github.com/mwhudson/juju@v0.0.0-20160512215208-90ff01f3497f/worker/uniter/util_test.go (about) 1 // Copyright 2012-2014 Canonical Ltd. 2 // Licensed under the AGPLv3, see LICENCE file for details. 3 4 package uniter_test 5 6 import ( 7 "bytes" 8 "fmt" 9 "io/ioutil" 10 "os" 11 "os/exec" 12 "path/filepath" 13 "reflect" 14 "runtime" 15 "strconv" 16 "strings" 17 "sync" 18 "time" 19 20 "github.com/juju/errors" 21 "github.com/juju/names" 22 gt "github.com/juju/testing" 23 jc "github.com/juju/testing/checkers" 24 ft "github.com/juju/testing/filetesting" 25 "github.com/juju/utils" 26 "github.com/juju/utils/clock" 27 utilexec "github.com/juju/utils/exec" 28 "github.com/juju/utils/fslock" 29 "github.com/juju/utils/proxy" 30 gc "gopkg.in/check.v1" 31 corecharm "gopkg.in/juju/charm.v6-unstable" 32 goyaml "gopkg.in/yaml.v2" 33 34 "github.com/juju/juju/api" 35 apiuniter "github.com/juju/juju/api/uniter" 36 "github.com/juju/juju/core/leadership" 37 coreleadership "github.com/juju/juju/core/leadership" 38 "github.com/juju/juju/juju/sockets" 39 "github.com/juju/juju/juju/testing" 40 "github.com/juju/juju/network" 41 "github.com/juju/juju/resource/resourcetesting" 42 "github.com/juju/juju/state" 43 "github.com/juju/juju/state/storage" 44 "github.com/juju/juju/status" 45 "github.com/juju/juju/testcharms" 46 coretesting "github.com/juju/juju/testing" 47 "github.com/juju/juju/worker" 48 "github.com/juju/juju/worker/fortress" 49 "github.com/juju/juju/worker/uniter" 50 "github.com/juju/juju/worker/uniter/charm" 51 "github.com/juju/juju/worker/uniter/operation" 52 ) 53 54 // worstCase is used for timeouts when timing out 55 // will fail the test. Raising this value should 56 // not affect the overall running time of the tests 57 // unless they fail. 58 const worstCase = coretesting.LongWait 59 60 // Assign the unit to a provisioned machine with dummy addresses set. 61 func assertAssignUnit(c *gc.C, st *state.State, u *state.Unit) { 62 err := u.AssignToNewMachine() 63 c.Assert(err, jc.ErrorIsNil) 64 mid, err := u.AssignedMachineId() 65 c.Assert(err, jc.ErrorIsNil) 66 machine, err := st.Machine(mid) 67 c.Assert(err, jc.ErrorIsNil) 68 err = machine.SetProvisioned("i-exist", "fake_nonce", nil) 69 c.Assert(err, jc.ErrorIsNil) 70 err = machine.SetProviderAddresses(network.Address{ 71 Type: network.IPv4Address, 72 Scope: network.ScopeCloudLocal, 73 Value: "private.address.example.com", 74 }, network.Address{ 75 Type: network.IPv4Address, 76 Scope: network.ScopePublic, 77 Value: "public.address.example.com", 78 }) 79 c.Assert(err, jc.ErrorIsNil) 80 } 81 82 type context struct { 83 uuid string 84 path string 85 dataDir string 86 s *UniterSuite 87 st *state.State 88 api *apiuniter.State 89 apiConn api.Connection 90 leaderClaimer coreleadership.Claimer 91 leaderTracker *mockLeaderTracker 92 charmDirGuard *mockCharmDirGuard 93 charms map[string][]byte 94 hooks []string 95 sch *state.Charm 96 svc *state.Service 97 unit *state.Unit 98 uniter *uniter.Uniter 99 relatedSvc *state.Service 100 relation *state.Relation 101 relationUnits map[string]*state.RelationUnit 102 subordinate *state.Unit 103 updateStatusHookTicker *manualTicker 104 err string 105 106 wg sync.WaitGroup 107 mu sync.Mutex 108 hooksCompleted []string 109 } 110 111 var _ uniter.UniterExecutionObserver = (*context)(nil) 112 113 // HookCompleted implements the UniterExecutionObserver interface. 114 func (ctx *context) HookCompleted(hookName string) { 115 ctx.mu.Lock() 116 ctx.hooksCompleted = append(ctx.hooksCompleted, hookName) 117 ctx.mu.Unlock() 118 } 119 120 // HookFailed implements the UniterExecutionObserver interface. 121 func (ctx *context) HookFailed(hookName string) { 122 ctx.mu.Lock() 123 ctx.hooksCompleted = append(ctx.hooksCompleted, "fail-"+hookName) 124 ctx.mu.Unlock() 125 } 126 127 func (ctx *context) setExpectedError(err string) { 128 ctx.mu.Lock() 129 ctx.err = err 130 ctx.mu.Unlock() 131 } 132 133 func (ctx *context) run(c *gc.C, steps []stepper) { 134 defer func() { 135 if ctx.uniter != nil { 136 err := worker.Stop(ctx.uniter) 137 if ctx.err == "" { 138 c.Assert(err, jc.ErrorIsNil) 139 } else { 140 c.Assert(err, gc.ErrorMatches, ctx.err) 141 } 142 } 143 }() 144 for i, s := range steps { 145 c.Logf("step %d:\n", i) 146 step(c, ctx, s) 147 } 148 } 149 150 func (ctx *context) apiLogin(c *gc.C) { 151 password, err := utils.RandomPassword() 152 c.Assert(err, jc.ErrorIsNil) 153 err = ctx.unit.SetPassword(password) 154 c.Assert(err, jc.ErrorIsNil) 155 apiConn := ctx.s.OpenAPIAs(c, ctx.unit.Tag(), password) 156 c.Assert(apiConn, gc.NotNil) 157 c.Logf("API: login as %q successful", ctx.unit.Tag()) 158 api, err := apiConn.Uniter() 159 c.Assert(err, jc.ErrorIsNil) 160 c.Assert(api, gc.NotNil) 161 ctx.api = api 162 ctx.apiConn = apiConn 163 ctx.leaderClaimer = ctx.st.LeadershipClaimer() 164 ctx.leaderTracker = newMockLeaderTracker(ctx) 165 ctx.leaderTracker.setLeader(c, true) 166 } 167 168 func (ctx *context) writeExplicitHook(c *gc.C, path string, contents string) { 169 err := ioutil.WriteFile(path+cmdSuffix, []byte(contents), 0755) 170 c.Assert(err, jc.ErrorIsNil) 171 } 172 173 func (ctx *context) writeHook(c *gc.C, path string, good bool) { 174 hook := badHook 175 if good { 176 hook = goodHook 177 } 178 content := fmt.Sprintf(hook, filepath.Base(path)) 179 ctx.writeExplicitHook(c, path, content) 180 } 181 182 func (ctx *context) writeActions(c *gc.C, path string, names []string) { 183 for _, name := range names { 184 ctx.writeAction(c, path, name) 185 } 186 } 187 188 func (ctx *context) writeMetricsYaml(c *gc.C, path string) { 189 metricsYamlPath := filepath.Join(path, "metrics.yaml") 190 var metricsYamlFull []byte = []byte(` 191 metrics: 192 pings: 193 type: gauge 194 description: sample metric 195 `) 196 err := ioutil.WriteFile(metricsYamlPath, []byte(metricsYamlFull), 0755) 197 c.Assert(err, jc.ErrorIsNil) 198 } 199 200 func (ctx *context) writeAction(c *gc.C, path, name string) { 201 actionPath := filepath.Join(path, "actions", name) 202 action := actions[name] 203 err := ioutil.WriteFile(actionPath+cmdSuffix, []byte(action), 0755) 204 c.Assert(err, jc.ErrorIsNil) 205 } 206 207 func (ctx *context) writeActionsYaml(c *gc.C, path string, names ...string) { 208 var actionsYaml = map[string]string{ 209 "base": "", 210 "snapshot": ` 211 snapshot: 212 description: Take a snapshot of the database. 213 params: 214 outfile: 215 description: "The file to write out to." 216 type: string 217 required: ["outfile"] 218 `[1:], 219 "action-log": ` 220 action-log: 221 `[1:], 222 "action-log-fail": ` 223 action-log-fail: 224 `[1:], 225 "action-log-fail-error": ` 226 action-log-fail-error: 227 `[1:], 228 "action-reboot": ` 229 action-reboot: 230 `[1:], 231 } 232 actionsYamlPath := filepath.Join(path, "actions.yaml") 233 var actionsYamlFull string 234 // Build an appropriate actions.yaml 235 if names[0] != "base" { 236 names = append([]string{"base"}, names...) 237 } 238 for _, name := range names { 239 actionsYamlFull = strings.Join( 240 []string{actionsYamlFull, actionsYaml[name]}, "\n") 241 } 242 err := ioutil.WriteFile(actionsYamlPath, []byte(actionsYamlFull), 0755) 243 c.Assert(err, jc.ErrorIsNil) 244 } 245 246 func (ctx *context) matchHooks(c *gc.C) (match bool, overshoot bool) { 247 ctx.mu.Lock() 248 defer ctx.mu.Unlock() 249 c.Logf(" actual hooks: %#v", ctx.hooksCompleted) 250 c.Logf("expected hooks: %#v", ctx.hooks) 251 if len(ctx.hooksCompleted) < len(ctx.hooks) { 252 return false, false 253 } 254 for i, e := range ctx.hooks { 255 if ctx.hooksCompleted[i] != e { 256 return false, false 257 } 258 } 259 return true, len(ctx.hooksCompleted) > len(ctx.hooks) 260 } 261 262 type uniterTest struct { 263 summary string 264 steps []stepper 265 } 266 267 func ut(summary string, steps ...stepper) uniterTest { 268 return uniterTest{summary, steps} 269 } 270 271 type stepper interface { 272 step(c *gc.C, ctx *context) 273 } 274 275 func step(c *gc.C, ctx *context, s stepper) { 276 c.Logf("%#v", s) 277 s.step(c, ctx) 278 } 279 280 type ensureStateWorker struct{} 281 282 func (s ensureStateWorker) step(c *gc.C, ctx *context) { 283 addresses, err := ctx.st.Addresses() 284 if err != nil || len(addresses) == 0 { 285 addControllerMachine(c, ctx.st) 286 } 287 addresses, err = ctx.st.APIAddressesFromMachines() 288 c.Assert(err, jc.ErrorIsNil) 289 c.Assert(addresses, gc.HasLen, 1) 290 } 291 292 func addControllerMachine(c *gc.C, st *state.State) { 293 // The AddControllerMachine call will update the API host ports 294 // to made-up addresses. We need valid addresses so that the uniter 295 // can download charms from the API server. 296 apiHostPorts, err := st.APIHostPorts() 297 c.Assert(err, gc.IsNil) 298 testing.AddControllerMachine(c, st) 299 err = st.SetAPIHostPorts(apiHostPorts) 300 c.Assert(err, gc.IsNil) 301 } 302 303 type createCharm struct { 304 revision int 305 badHooks []string 306 customize func(*gc.C, *context, string) 307 } 308 309 var ( 310 baseCharmHooks = []string{ 311 "install", "start", "config-changed", "upgrade-charm", "stop", 312 "db-relation-joined", "db-relation-changed", "db-relation-departed", 313 "db-relation-broken", "meter-status-changed", "collect-metrics", "update-status", 314 } 315 leaderCharmHooks = []string{ 316 "leader-elected", "leader-deposed", "leader-settings-changed", 317 } 318 storageCharmHooks = []string{ 319 "wp-content-storage-attached", "wp-content-storage-detaching", 320 } 321 ) 322 323 func startupHooks(minion bool) []string { 324 leaderHook := "leader-elected" 325 if minion { 326 leaderHook = "leader-settings-changed" 327 } 328 return []string{"install", leaderHook, "config-changed", "start"} 329 } 330 331 func (s createCharm) step(c *gc.C, ctx *context) { 332 base := testcharms.Repo.ClonedDirPath(c.MkDir(), "wordpress") 333 334 allCharmHooks := baseCharmHooks 335 allCharmHooks = append(allCharmHooks, leaderCharmHooks...) 336 allCharmHooks = append(allCharmHooks, storageCharmHooks...) 337 338 for _, name := range allCharmHooks { 339 path := filepath.Join(base, "hooks", name) 340 good := true 341 for _, bad := range s.badHooks { 342 if name == bad { 343 good = false 344 } 345 } 346 ctx.writeHook(c, path, good) 347 } 348 if s.customize != nil { 349 s.customize(c, ctx, base) 350 } 351 dir, err := corecharm.ReadCharmDir(base) 352 c.Assert(err, jc.ErrorIsNil) 353 err = dir.SetDiskRevision(s.revision) 354 c.Assert(err, jc.ErrorIsNil) 355 step(c, ctx, addCharm{dir, curl(s.revision)}) 356 } 357 358 func (s createCharm) charmURL() string { 359 return curl(s.revision).String() 360 } 361 362 type addCharm struct { 363 dir *corecharm.CharmDir 364 curl *corecharm.URL 365 } 366 367 func (s addCharm) step(c *gc.C, ctx *context) { 368 var buf bytes.Buffer 369 err := s.dir.ArchiveTo(&buf) 370 c.Assert(err, jc.ErrorIsNil) 371 body := buf.Bytes() 372 hash, _, err := utils.ReadSHA256(&buf) 373 c.Assert(err, jc.ErrorIsNil) 374 375 storagePath := fmt.Sprintf("/charms/%s/%d", s.dir.Meta().Name, s.dir.Revision()) 376 ctx.charms[storagePath] = body 377 info := state.CharmInfo{ 378 Charm: s.dir, 379 ID: s.curl, 380 StoragePath: storagePath, 381 SHA256: hash, 382 } 383 384 ctx.sch, err = ctx.st.AddCharm(info) 385 c.Assert(err, jc.ErrorIsNil) 386 } 387 388 type serveCharm struct{} 389 390 func (s serveCharm) step(c *gc.C, ctx *context) { 391 storage := storage.NewStorage(ctx.st.ModelUUID(), ctx.st.MongoSession()) 392 for storagePath, data := range ctx.charms { 393 err := storage.Put(storagePath, bytes.NewReader(data), int64(len(data))) 394 c.Assert(err, jc.ErrorIsNil) 395 delete(ctx.charms, storagePath) 396 } 397 } 398 399 type createServiceAndUnit struct { 400 serviceName string 401 } 402 403 func (csau createServiceAndUnit) step(c *gc.C, ctx *context) { 404 if csau.serviceName == "" { 405 csau.serviceName = "u" 406 } 407 sch, err := ctx.st.Charm(curl(0)) 408 c.Assert(err, jc.ErrorIsNil) 409 svc := ctx.s.AddTestingService(c, csau.serviceName, sch) 410 unit, err := svc.AddUnit() 411 c.Assert(err, jc.ErrorIsNil) 412 413 // Assign the unit to a provisioned machine to match expected state. 414 assertAssignUnit(c, ctx.st, unit) 415 ctx.svc = svc 416 ctx.unit = unit 417 418 ctx.apiLogin(c) 419 } 420 421 type createUniter struct { 422 minion bool 423 executorFunc uniter.NewExecutorFunc 424 } 425 426 func (s createUniter) step(c *gc.C, ctx *context) { 427 step(c, ctx, ensureStateWorker{}) 428 step(c, ctx, createServiceAndUnit{}) 429 if s.minion { 430 step(c, ctx, forceMinion{}) 431 } 432 step(c, ctx, startUniter{newExecutorFunc: s.executorFunc}) 433 step(c, ctx, waitAddresses{}) 434 } 435 436 type waitAddresses struct{} 437 438 func (waitAddresses) step(c *gc.C, ctx *context) { 439 timeout := time.After(worstCase) 440 for { 441 select { 442 case <-timeout: 443 c.Fatalf("timed out waiting for unit addresses") 444 case <-time.After(coretesting.ShortWait): 445 err := ctx.unit.Refresh() 446 if err != nil { 447 c.Fatalf("unit refresh failed: %v", err) 448 } 449 // GZ 2013-07-10: Hardcoded values from dummy environ 450 // special cased here, questionable. 451 private, _ := ctx.unit.PrivateAddress() 452 if private.Value != "private.address.example.com" { 453 continue 454 } 455 public, _ := ctx.unit.PublicAddress() 456 if public.Value != "public.address.example.com" { 457 continue 458 } 459 return 460 } 461 } 462 } 463 464 type startUniter struct { 465 unitTag string 466 newExecutorFunc uniter.NewExecutorFunc 467 } 468 469 func (s startUniter) step(c *gc.C, ctx *context) { 470 if s.unitTag == "" { 471 s.unitTag = "unit-u-0" 472 } 473 if ctx.uniter != nil { 474 panic("don't start two uniters!") 475 } 476 if ctx.api == nil { 477 panic("API connection not established") 478 } 479 tag, err := names.ParseUnitTag(s.unitTag) 480 if err != nil { 481 panic(err.Error()) 482 } 483 downloader := api.NewCharmDownloader(ctx.apiConn.Client()) 484 locksDir := filepath.Join(ctx.dataDir, "locks") 485 lock, err := fslock.NewLock(locksDir, "uniter-hook-execution", fslock.Defaults()) 486 c.Assert(err, jc.ErrorIsNil) 487 operationExecutor := operation.NewExecutor 488 if s.newExecutorFunc != nil { 489 operationExecutor = s.newExecutorFunc 490 } 491 492 uniterParams := uniter.UniterParams{ 493 UniterFacade: ctx.api, 494 UnitTag: tag, 495 LeadershipTracker: ctx.leaderTracker, 496 CharmDirGuard: ctx.charmDirGuard, 497 DataDir: ctx.dataDir, 498 Downloader: downloader, 499 MachineLock: lock, 500 UpdateStatusSignal: ctx.updateStatusHookTicker.ReturnTimer, 501 NewOperationExecutor: operationExecutor, 502 Observer: ctx, 503 // TODO(axw) 2015-11-02 #1512191 504 // update tests that rely on timing to advance clock 505 // appropriately. 506 Clock: clock.WallClock, 507 } 508 ctx.uniter, err = uniter.NewUniter(&uniterParams) 509 c.Assert(err, jc.ErrorIsNil) 510 } 511 512 type waitUniterDead struct { 513 err string 514 } 515 516 func (s waitUniterDead) step(c *gc.C, ctx *context) { 517 if s.err != "" { 518 err := s.waitDead(c, ctx) 519 c.Assert(err, gc.ErrorMatches, s.err) 520 return 521 } 522 523 // In the default case, we're waiting for worker.ErrTerminateAgent, but 524 // the path to that error can be tricky. If the unit becomes Dead at an 525 // inconvenient time, unrelated calls can fail -- as they should -- but 526 // not be detected as worker.ErrTerminateAgent. In this case, we restart 527 // the uniter and check that it fails as expected when starting up; this 528 // mimics the behaviour of the unit agent and verifies that the UA will, 529 // eventually, see the correct error and respond appropriately. 530 err := s.waitDead(c, ctx) 531 if err != worker.ErrTerminateAgent { 532 step(c, ctx, startUniter{}) 533 err = s.waitDead(c, ctx) 534 } 535 c.Assert(err, gc.Equals, worker.ErrTerminateAgent) 536 err = ctx.unit.Refresh() 537 c.Assert(err, jc.ErrorIsNil) 538 c.Assert(ctx.unit.Life(), gc.Equals, state.Dead) 539 } 540 541 func (s waitUniterDead) waitDead(c *gc.C, ctx *context) error { 542 u := ctx.uniter 543 ctx.uniter = nil 544 545 wait := make(chan error, 1) 546 go func() { 547 wait <- u.Wait() 548 }() 549 550 ctx.s.BackingState.StartSync() 551 select { 552 case err := <-wait: 553 return err 554 case <-time.After(worstCase): 555 u.Kill() 556 c.Fatalf("uniter still alive") 557 } 558 panic("unreachable") 559 } 560 561 type stopUniter struct { 562 err string 563 } 564 565 func (s stopUniter) step(c *gc.C, ctx *context) { 566 u := ctx.uniter 567 if u == nil { 568 c.Logf("uniter not started, skipping stopUniter{}") 569 return 570 } 571 ctx.uniter = nil 572 err := worker.Stop(u) 573 if s.err == "" { 574 c.Assert(err, jc.ErrorIsNil) 575 } else { 576 c.Assert(err, gc.ErrorMatches, s.err) 577 } 578 } 579 580 type verifyWaiting struct{} 581 582 func (s verifyWaiting) step(c *gc.C, ctx *context) { 583 step(c, ctx, stopUniter{}) 584 step(c, ctx, startUniter{}) 585 step(c, ctx, waitHooks{}) 586 } 587 588 type verifyRunning struct { 589 minion bool 590 } 591 592 func (s verifyRunning) step(c *gc.C, ctx *context) { 593 step(c, ctx, stopUniter{}) 594 step(c, ctx, startUniter{}) 595 var hooks []string 596 if s.minion { 597 hooks = append(hooks, "leader-settings-changed") 598 } 599 hooks = append(hooks, "config-changed") 600 step(c, ctx, waitHooks(hooks)) 601 } 602 603 type startupErrorWithCustomCharm struct { 604 badHook string 605 customize func(*gc.C, *context, string) 606 } 607 608 func (s startupErrorWithCustomCharm) step(c *gc.C, ctx *context) { 609 step(c, ctx, createCharm{ 610 badHooks: []string{s.badHook}, 611 customize: s.customize, 612 }) 613 step(c, ctx, serveCharm{}) 614 step(c, ctx, createUniter{}) 615 step(c, ctx, waitUnitAgent{ 616 statusGetter: unitStatusGetter, 617 status: status.StatusError, 618 info: fmt.Sprintf(`hook failed: %q`, s.badHook), 619 }) 620 for _, hook := range startupHooks(false) { 621 if hook == s.badHook { 622 step(c, ctx, waitHooks{"fail-" + hook}) 623 break 624 } 625 step(c, ctx, waitHooks{hook}) 626 } 627 step(c, ctx, verifyCharm{}) 628 } 629 630 type startupError struct { 631 badHook string 632 } 633 634 func (s startupError) step(c *gc.C, ctx *context) { 635 step(c, ctx, createCharm{badHooks: []string{s.badHook}}) 636 step(c, ctx, serveCharm{}) 637 step(c, ctx, createUniter{}) 638 step(c, ctx, waitUnitAgent{ 639 statusGetter: unitStatusGetter, 640 status: status.StatusError, 641 info: fmt.Sprintf(`hook failed: %q`, s.badHook), 642 }) 643 for _, hook := range startupHooks(false) { 644 if hook == s.badHook { 645 step(c, ctx, waitHooks{"fail-" + hook}) 646 break 647 } 648 step(c, ctx, waitHooks{hook}) 649 } 650 step(c, ctx, verifyCharm{}) 651 } 652 653 type quickStart struct { 654 minion bool 655 } 656 657 func (s quickStart) step(c *gc.C, ctx *context) { 658 step(c, ctx, createCharm{}) 659 step(c, ctx, serveCharm{}) 660 step(c, ctx, createUniter{minion: s.minion}) 661 step(c, ctx, waitUnitAgent{status: status.StatusIdle}) 662 step(c, ctx, waitHooks(startupHooks(s.minion))) 663 step(c, ctx, verifyCharm{}) 664 } 665 666 type quickStartRelation struct{} 667 668 func (s quickStartRelation) step(c *gc.C, ctx *context) { 669 step(c, ctx, quickStart{}) 670 step(c, ctx, addRelation{}) 671 step(c, ctx, addRelationUnit{}) 672 step(c, ctx, waitHooks{"db-relation-joined mysql/0 db:0", "db-relation-changed mysql/0 db:0"}) 673 step(c, ctx, verifyRunning{}) 674 } 675 676 type startupRelationError struct { 677 badHook string 678 } 679 680 func (s startupRelationError) step(c *gc.C, ctx *context) { 681 step(c, ctx, createCharm{badHooks: []string{s.badHook}}) 682 step(c, ctx, serveCharm{}) 683 step(c, ctx, createUniter{}) 684 step(c, ctx, waitUnitAgent{status: status.StatusIdle}) 685 step(c, ctx, waitHooks(startupHooks(false))) 686 step(c, ctx, verifyCharm{}) 687 step(c, ctx, addRelation{}) 688 step(c, ctx, addRelationUnit{}) 689 } 690 691 type resolveError struct { 692 resolved state.ResolvedMode 693 } 694 695 func (s resolveError) step(c *gc.C, ctx *context) { 696 err := ctx.unit.SetResolved(s.resolved) 697 c.Assert(err, jc.ErrorIsNil) 698 } 699 700 type statusfunc func() (status.StatusInfo, error) 701 702 type statusfuncGetter func(ctx *context) statusfunc 703 704 var unitStatusGetter = func(ctx *context) statusfunc { 705 return func() (status.StatusInfo, error) { 706 return ctx.unit.Status() 707 } 708 } 709 710 var agentStatusGetter = func(ctx *context) statusfunc { 711 return func() (status.StatusInfo, error) { 712 return ctx.unit.AgentStatus() 713 } 714 } 715 716 type waitUnitAgent struct { 717 statusGetter func(ctx *context) statusfunc 718 status status.Status 719 info string 720 data map[string]interface{} 721 charm int 722 resolved state.ResolvedMode 723 } 724 725 func (s waitUnitAgent) step(c *gc.C, ctx *context) { 726 if s.statusGetter == nil { 727 s.statusGetter = agentStatusGetter 728 } 729 timeout := time.After(worstCase) 730 for { 731 ctx.s.BackingState.StartSync() 732 select { 733 case <-time.After(coretesting.ShortWait): 734 err := ctx.unit.Refresh() 735 if err != nil { 736 c.Fatalf("cannot refresh unit: %v", err) 737 } 738 resolved := ctx.unit.Resolved() 739 if resolved != s.resolved { 740 c.Logf("want resolved mode %q, got %q; still waiting", s.resolved, resolved) 741 continue 742 } 743 url, ok := ctx.unit.CharmURL() 744 if !ok || *url != *curl(s.charm) { 745 var got string 746 if ok { 747 got = url.String() 748 } 749 c.Logf("want unit charm %q, got %q; still waiting", curl(s.charm), got) 750 continue 751 } 752 statusInfo, err := s.statusGetter(ctx)() 753 c.Assert(err, jc.ErrorIsNil) 754 if string(statusInfo.Status) != string(s.status) { 755 c.Logf("want unit status %q, got %q; still waiting", s.status, statusInfo.Status) 756 continue 757 } 758 if statusInfo.Message != s.info { 759 c.Logf("want unit status info %q, got %q; still waiting", s.info, statusInfo.Message) 760 continue 761 } 762 if s.data != nil { 763 if len(statusInfo.Data) != len(s.data) { 764 c.Logf("want %d status data value(s), got %d; still waiting", len(s.data), len(statusInfo.Data)) 765 continue 766 } 767 for key, value := range s.data { 768 if statusInfo.Data[key] != value { 769 c.Logf("want status data value %q for key %q, got %q; still waiting", 770 value, key, statusInfo.Data[key]) 771 continue 772 } 773 } 774 } 775 return 776 case <-timeout: 777 c.Fatalf("never reached desired status") 778 } 779 } 780 } 781 782 type waitHooks []string 783 784 func (s waitHooks) step(c *gc.C, ctx *context) { 785 if len(s) == 0 { 786 // Give unwanted hooks a moment to run... 787 ctx.s.BackingState.StartSync() 788 time.Sleep(coretesting.ShortWait) 789 } 790 ctx.hooks = append(ctx.hooks, s...) 791 c.Logf("waiting for hooks: %#v", ctx.hooks) 792 match, overshoot := ctx.matchHooks(c) 793 if overshoot && len(s) == 0 { 794 c.Fatalf("ran more hooks than expected") 795 } 796 waitExecutionLockReleased := func() { 797 lock := createHookLock(c, ctx.dataDir) 798 if err := lock.LockWithTimeout(worstCase, "waiting for lock"); err != nil { 799 c.Fatalf("failed to acquire execution lock: %v", err) 800 } 801 if err := lock.Unlock(); err != nil { 802 c.Fatalf("failed to release execution lock: %v", err) 803 } 804 } 805 if match { 806 if len(s) > 0 { 807 // only check for lock release if there were hooks 808 // run; hooks *not* running may be due to the lock 809 // being held. 810 waitExecutionLockReleased() 811 } 812 return 813 } 814 timeout := time.After(worstCase) 815 for { 816 ctx.s.BackingState.StartSync() 817 select { 818 case <-time.After(coretesting.ShortWait): 819 if match, _ = ctx.matchHooks(c); match { 820 waitExecutionLockReleased() 821 return 822 } 823 case <-timeout: 824 c.Fatalf("never got expected hooks") 825 } 826 } 827 } 828 829 type actionResult struct { 830 name string 831 results map[string]interface{} 832 status string 833 message string 834 } 835 836 type waitActionResults struct { 837 expectedResults []actionResult 838 } 839 840 func (s waitActionResults) step(c *gc.C, ctx *context) { 841 resultsWatcher := ctx.st.WatchActionResults() 842 defer func() { 843 c.Assert(resultsWatcher.Stop(), gc.IsNil) 844 }() 845 timeout := time.After(worstCase) 846 for { 847 ctx.s.BackingState.StartSync() 848 select { 849 case <-time.After(coretesting.ShortWait): 850 continue 851 case <-timeout: 852 c.Fatalf("timed out waiting for action results") 853 case changes, ok := <-resultsWatcher.Changes(): 854 c.Logf("Got changes: %#v", changes) 855 c.Assert(ok, jc.IsTrue) 856 stateActionResults, err := ctx.unit.CompletedActions() 857 c.Assert(err, jc.ErrorIsNil) 858 if len(stateActionResults) != len(s.expectedResults) { 859 continue 860 } 861 actualResults := make([]actionResult, len(stateActionResults)) 862 for i, result := range stateActionResults { 863 results, message := result.Results() 864 actualResults[i] = actionResult{ 865 name: result.Name(), 866 results: results, 867 status: string(result.Status()), 868 message: message, 869 } 870 } 871 assertActionResultsMatch(c, actualResults, s.expectedResults) 872 return 873 } 874 } 875 } 876 877 func assertActionResultsMatch(c *gc.C, actualIn []actionResult, expectIn []actionResult) { 878 matches := 0 879 desiredMatches := len(actualIn) 880 c.Assert(len(actualIn), gc.Equals, len(expectIn)) 881 findMatch: 882 for _, expectedItem := range expectIn { 883 // find expectedItem in actualIn 884 for j, actualItem := range actualIn { 885 // If we find a match, remove both items from their 886 // respective slices, increment match count, and restart. 887 if reflect.DeepEqual(actualItem, expectedItem) { 888 actualIn = append(actualIn[:j], actualIn[j+1:]...) 889 matches++ 890 continue findMatch 891 } 892 } 893 // if we finish the whole thing without finding a match, we failed. 894 c.Assert(actualIn, jc.DeepEquals, expectIn) 895 } 896 897 c.Assert(matches, gc.Equals, desiredMatches) 898 } 899 900 type verifyNoActionResults struct{} 901 902 func (s verifyNoActionResults) step(c *gc.C, ctx *context) { 903 time.Sleep(coretesting.ShortWait) 904 result, err := ctx.unit.CompletedActions() 905 c.Assert(err, jc.ErrorIsNil) 906 c.Assert(result, gc.HasLen, 0) 907 } 908 909 type fixHook struct { 910 name string 911 } 912 913 func (s fixHook) step(c *gc.C, ctx *context) { 914 path := filepath.Join(ctx.path, "charm", "hooks", s.name) 915 ctx.writeHook(c, path, true) 916 } 917 918 type changeMeterStatus struct { 919 code string 920 info string 921 } 922 923 func (s changeMeterStatus) step(c *gc.C, ctx *context) { 924 err := ctx.unit.SetMeterStatus(s.code, s.info) 925 c.Assert(err, jc.ErrorIsNil) 926 } 927 928 type updateStatusHookTick struct{} 929 930 func (s updateStatusHookTick) step(c *gc.C, ctx *context) { 931 err := ctx.updateStatusHookTicker.Tick() 932 c.Assert(err, jc.ErrorIsNil) 933 } 934 935 type changeConfig map[string]interface{} 936 937 func (s changeConfig) step(c *gc.C, ctx *context) { 938 err := ctx.svc.UpdateConfigSettings(corecharm.Settings(s)) 939 c.Assert(err, jc.ErrorIsNil) 940 } 941 942 type addAction struct { 943 name string 944 params map[string]interface{} 945 } 946 947 func (s addAction) step(c *gc.C, ctx *context) { 948 _, err := ctx.st.EnqueueAction(ctx.unit.Tag(), s.name, s.params) 949 c.Assert(err, jc.ErrorIsNil) 950 } 951 952 type upgradeCharm struct { 953 revision int 954 forced bool 955 } 956 957 func (s upgradeCharm) step(c *gc.C, ctx *context) { 958 curl := curl(s.revision) 959 sch, err := ctx.st.Charm(curl) 960 c.Assert(err, jc.ErrorIsNil) 961 cfg := state.SetCharmConfig{ 962 Charm: sch, 963 ForceUnits: s.forced, 964 } 965 err = ctx.svc.SetCharm(cfg) 966 c.Assert(err, jc.ErrorIsNil) 967 serveCharm{}.step(c, ctx) 968 } 969 970 type verifyCharm struct { 971 revision int 972 attemptedRevision int 973 checkFiles ft.Entries 974 } 975 976 func (s verifyCharm) step(c *gc.C, ctx *context) { 977 s.checkFiles.Check(c, filepath.Join(ctx.path, "charm")) 978 path := filepath.Join(ctx.path, "charm", "revision") 979 content, err := ioutil.ReadFile(path) 980 c.Assert(err, jc.ErrorIsNil) 981 c.Assert(string(content), gc.Equals, strconv.Itoa(s.revision)) 982 checkRevision := s.revision 983 if s.attemptedRevision > checkRevision { 984 checkRevision = s.attemptedRevision 985 } 986 err = ctx.unit.Refresh() 987 c.Assert(err, jc.ErrorIsNil) 988 url, ok := ctx.unit.CharmURL() 989 c.Assert(ok, jc.IsTrue) 990 c.Assert(url, gc.DeepEquals, curl(checkRevision)) 991 } 992 993 type pushResource struct{} 994 995 func (s pushResource) step(c *gc.C, ctx *context) { 996 opened := resourcetesting.NewResource(c, >.Stub{}, "data", ctx.unit.ServiceName(), "the bytes") 997 998 res, err := ctx.st.Resources() 999 c.Assert(err, jc.ErrorIsNil) 1000 _, err = res.SetResource( 1001 ctx.unit.ServiceName(), 1002 opened.Username, 1003 opened.Resource.Resource, 1004 opened.ReadCloser, 1005 ) 1006 c.Assert(err, jc.ErrorIsNil) 1007 } 1008 1009 type startUpgradeError struct{} 1010 1011 func (s startUpgradeError) step(c *gc.C, ctx *context) { 1012 steps := []stepper{ 1013 createCharm{ 1014 customize: func(c *gc.C, ctx *context, path string) { 1015 appendHook(c, path, "start", "chmod 555 $CHARM_DIR") 1016 }, 1017 }, 1018 serveCharm{}, 1019 createUniter{}, 1020 waitUnitAgent{ 1021 status: status.StatusIdle, 1022 }, 1023 waitHooks(startupHooks(false)), 1024 verifyCharm{}, 1025 1026 createCharm{revision: 1}, 1027 serveCharm{}, 1028 upgradeCharm{revision: 1}, 1029 waitUnitAgent{ 1030 statusGetter: unitStatusGetter, 1031 status: status.StatusError, 1032 info: "upgrade failed", 1033 charm: 1, 1034 }, 1035 verifyWaiting{}, 1036 verifyCharm{attemptedRevision: 1}, 1037 } 1038 for _, s_ := range steps { 1039 step(c, ctx, s_) 1040 } 1041 } 1042 1043 type verifyWaitingUpgradeError struct { 1044 revision int 1045 } 1046 1047 func (s verifyWaitingUpgradeError) step(c *gc.C, ctx *context) { 1048 verifyCharmSteps := []stepper{ 1049 waitUnitAgent{ 1050 statusGetter: unitStatusGetter, 1051 status: status.StatusError, 1052 info: "upgrade failed", 1053 charm: s.revision, 1054 }, 1055 verifyCharm{attemptedRevision: s.revision}, 1056 } 1057 verifyWaitingSteps := []stepper{ 1058 stopUniter{}, 1059 custom{func(c *gc.C, ctx *context) { 1060 // By setting status to Idle, and waiting for the restarted uniter 1061 // to reset the error status, we can avoid a race in which a subsequent 1062 // fixUpgradeError lands just before the restarting uniter retries the 1063 // upgrade; and thus puts us in an unexpected state for future steps. 1064 err := ctx.unit.SetAgentStatus(status.StatusIdle, "", nil) 1065 c.Check(err, jc.ErrorIsNil) 1066 }}, 1067 startUniter{}, 1068 } 1069 allSteps := append(verifyCharmSteps, verifyWaitingSteps...) 1070 allSteps = append(allSteps, verifyCharmSteps...) 1071 for _, s_ := range allSteps { 1072 step(c, ctx, s_) 1073 } 1074 } 1075 1076 type fixUpgradeError struct{} 1077 1078 func (s fixUpgradeError) step(c *gc.C, ctx *context) { 1079 charmPath := filepath.Join(ctx.path, "charm") 1080 err := os.Chmod(charmPath, 0755) 1081 c.Assert(err, jc.ErrorIsNil) 1082 } 1083 1084 type addRelation struct { 1085 waitJoin bool 1086 } 1087 1088 func (s addRelation) step(c *gc.C, ctx *context) { 1089 if ctx.relation != nil { 1090 panic("don't add two relations!") 1091 } 1092 if ctx.relatedSvc == nil { 1093 ctx.relatedSvc = ctx.s.AddTestingService(c, "mysql", ctx.s.AddTestingCharm(c, "mysql")) 1094 } 1095 eps, err := ctx.st.InferEndpoints("u", "mysql") 1096 c.Assert(err, jc.ErrorIsNil) 1097 ctx.relation, err = ctx.st.AddRelation(eps...) 1098 c.Assert(err, jc.ErrorIsNil) 1099 ctx.relationUnits = map[string]*state.RelationUnit{} 1100 if !s.waitJoin { 1101 return 1102 } 1103 1104 // It's hard to do this properly (watching scope) without perturbing other tests. 1105 ru, err := ctx.relation.Unit(ctx.unit) 1106 c.Assert(err, jc.ErrorIsNil) 1107 timeout := time.After(worstCase) 1108 for { 1109 c.Logf("waiting to join relation") 1110 select { 1111 case <-timeout: 1112 c.Fatalf("failed to join relation") 1113 case <-time.After(coretesting.ShortWait): 1114 inScope, err := ru.InScope() 1115 c.Assert(err, jc.ErrorIsNil) 1116 if inScope { 1117 return 1118 } 1119 } 1120 } 1121 } 1122 1123 type addRelationUnit struct{} 1124 1125 func (s addRelationUnit) step(c *gc.C, ctx *context) { 1126 u, err := ctx.relatedSvc.AddUnit() 1127 c.Assert(err, jc.ErrorIsNil) 1128 ru, err := ctx.relation.Unit(u) 1129 c.Assert(err, jc.ErrorIsNil) 1130 err = ru.EnterScope(nil) 1131 c.Assert(err, jc.ErrorIsNil) 1132 ctx.relationUnits[u.Name()] = ru 1133 } 1134 1135 type changeRelationUnit struct { 1136 name string 1137 } 1138 1139 func (s changeRelationUnit) step(c *gc.C, ctx *context) { 1140 settings, err := ctx.relationUnits[s.name].Settings() 1141 c.Assert(err, jc.ErrorIsNil) 1142 key := "madness?" 1143 raw, _ := settings.Get(key) 1144 val, _ := raw.(string) 1145 if val == "" { 1146 val = "this is juju" 1147 } else { 1148 val += "u" 1149 } 1150 settings.Set(key, val) 1151 _, err = settings.Write() 1152 c.Assert(err, jc.ErrorIsNil) 1153 } 1154 1155 type removeRelationUnit struct { 1156 name string 1157 } 1158 1159 func (s removeRelationUnit) step(c *gc.C, ctx *context) { 1160 err := ctx.relationUnits[s.name].LeaveScope() 1161 c.Assert(err, jc.ErrorIsNil) 1162 ctx.relationUnits[s.name] = nil 1163 } 1164 1165 type relationState struct { 1166 removed bool 1167 life state.Life 1168 } 1169 1170 func (s relationState) step(c *gc.C, ctx *context) { 1171 err := ctx.relation.Refresh() 1172 if s.removed { 1173 c.Assert(err, jc.Satisfies, errors.IsNotFound) 1174 return 1175 } 1176 c.Assert(err, jc.ErrorIsNil) 1177 c.Assert(ctx.relation.Life(), gc.Equals, s.life) 1178 1179 } 1180 1181 type addSubordinateRelation struct { 1182 ifce string 1183 } 1184 1185 func (s addSubordinateRelation) step(c *gc.C, ctx *context) { 1186 if _, err := ctx.st.Service("logging"); errors.IsNotFound(err) { 1187 ctx.s.AddTestingService(c, "logging", ctx.s.AddTestingCharm(c, "logging")) 1188 } 1189 eps, err := ctx.st.InferEndpoints("logging", "u:"+s.ifce) 1190 c.Assert(err, jc.ErrorIsNil) 1191 _, err = ctx.st.AddRelation(eps...) 1192 c.Assert(err, jc.ErrorIsNil) 1193 } 1194 1195 type removeSubordinateRelation struct { 1196 ifce string 1197 } 1198 1199 func (s removeSubordinateRelation) step(c *gc.C, ctx *context) { 1200 eps, err := ctx.st.InferEndpoints("logging", "u:"+s.ifce) 1201 c.Assert(err, jc.ErrorIsNil) 1202 rel, err := ctx.st.EndpointsRelation(eps...) 1203 c.Assert(err, jc.ErrorIsNil) 1204 err = rel.Destroy() 1205 c.Assert(err, jc.ErrorIsNil) 1206 } 1207 1208 type waitSubordinateExists struct { 1209 name string 1210 } 1211 1212 func (s waitSubordinateExists) step(c *gc.C, ctx *context) { 1213 timeout := time.After(worstCase) 1214 for { 1215 ctx.s.BackingState.StartSync() 1216 select { 1217 case <-timeout: 1218 c.Fatalf("subordinate was not created") 1219 case <-time.After(coretesting.ShortWait): 1220 var err error 1221 ctx.subordinate, err = ctx.st.Unit(s.name) 1222 if errors.IsNotFound(err) { 1223 continue 1224 } 1225 c.Assert(err, jc.ErrorIsNil) 1226 return 1227 } 1228 } 1229 } 1230 1231 type waitSubordinateDying struct{} 1232 1233 func (waitSubordinateDying) step(c *gc.C, ctx *context) { 1234 timeout := time.After(worstCase) 1235 for { 1236 ctx.s.BackingState.StartSync() 1237 select { 1238 case <-timeout: 1239 c.Fatalf("subordinate was not made Dying") 1240 case <-time.After(coretesting.ShortWait): 1241 err := ctx.subordinate.Refresh() 1242 c.Assert(err, jc.ErrorIsNil) 1243 if ctx.subordinate.Life() != state.Dying { 1244 continue 1245 } 1246 } 1247 break 1248 } 1249 } 1250 1251 type removeSubordinate struct{} 1252 1253 func (removeSubordinate) step(c *gc.C, ctx *context) { 1254 err := ctx.subordinate.EnsureDead() 1255 c.Assert(err, jc.ErrorIsNil) 1256 err = ctx.subordinate.Remove() 1257 c.Assert(err, jc.ErrorIsNil) 1258 ctx.subordinate = nil 1259 } 1260 1261 type assertYaml struct { 1262 path string 1263 expect map[string]interface{} 1264 } 1265 1266 func (s assertYaml) step(c *gc.C, ctx *context) { 1267 data, err := ioutil.ReadFile(filepath.Join(ctx.path, s.path)) 1268 c.Assert(err, jc.ErrorIsNil) 1269 actual := make(map[string]interface{}) 1270 err = goyaml.Unmarshal(data, &actual) 1271 c.Assert(err, jc.ErrorIsNil) 1272 c.Assert(actual, gc.DeepEquals, s.expect) 1273 } 1274 1275 type writeFile struct { 1276 path string 1277 mode os.FileMode 1278 } 1279 1280 func (s writeFile) step(c *gc.C, ctx *context) { 1281 path := filepath.Join(ctx.path, s.path) 1282 dir := filepath.Dir(path) 1283 err := os.MkdirAll(dir, 0755) 1284 c.Assert(err, jc.ErrorIsNil) 1285 err = ioutil.WriteFile(path, nil, s.mode) 1286 c.Assert(err, jc.ErrorIsNil) 1287 } 1288 1289 type chmod struct { 1290 path string 1291 mode os.FileMode 1292 } 1293 1294 func (s chmod) step(c *gc.C, ctx *context) { 1295 path := filepath.Join(ctx.path, s.path) 1296 err := os.Chmod(path, s.mode) 1297 c.Assert(err, jc.ErrorIsNil) 1298 } 1299 1300 type custom struct { 1301 f func(*gc.C, *context) 1302 } 1303 1304 func (s custom) step(c *gc.C, ctx *context) { 1305 s.f(c, ctx) 1306 } 1307 1308 var relationDying = custom{func(c *gc.C, ctx *context) { 1309 c.Assert(ctx.relation.Destroy(), gc.IsNil) 1310 }} 1311 1312 var unitDying = custom{func(c *gc.C, ctx *context) { 1313 c.Assert(ctx.unit.Destroy(), gc.IsNil) 1314 }} 1315 1316 var unitDead = custom{func(c *gc.C, ctx *context) { 1317 c.Assert(ctx.unit.EnsureDead(), gc.IsNil) 1318 }} 1319 1320 var subordinateDying = custom{func(c *gc.C, ctx *context) { 1321 c.Assert(ctx.subordinate.Destroy(), gc.IsNil) 1322 }} 1323 1324 func curl(revision int) *corecharm.URL { 1325 return corecharm.MustParseURL("cs:quantal/wordpress").WithRevision(revision) 1326 } 1327 1328 func appendHook(c *gc.C, charm, name, data string) { 1329 path := filepath.Join(charm, "hooks", name+cmdSuffix) 1330 f, err := os.OpenFile(path, os.O_WRONLY|os.O_APPEND, 0755) 1331 c.Assert(err, jc.ErrorIsNil) 1332 defer f.Close() 1333 _, err = f.Write([]byte(data)) 1334 c.Assert(err, jc.ErrorIsNil) 1335 } 1336 1337 func renameRelation(c *gc.C, charmPath, oldName, newName string) { 1338 path := filepath.Join(charmPath, "metadata.yaml") 1339 f, err := os.Open(path) 1340 c.Assert(err, jc.ErrorIsNil) 1341 defer f.Close() 1342 meta, err := corecharm.ReadMeta(f) 1343 c.Assert(err, jc.ErrorIsNil) 1344 1345 replace := func(what map[string]corecharm.Relation) bool { 1346 for relName, relation := range what { 1347 if relName == oldName { 1348 what[newName] = relation 1349 delete(what, oldName) 1350 return true 1351 } 1352 } 1353 return false 1354 } 1355 replaced := replace(meta.Provides) || replace(meta.Requires) || replace(meta.Peers) 1356 c.Assert(replaced, gc.Equals, true, gc.Commentf("charm %q does not implement relation %q", charmPath, oldName)) 1357 1358 newmeta, err := goyaml.Marshal(meta) 1359 c.Assert(err, jc.ErrorIsNil) 1360 ioutil.WriteFile(path, newmeta, 0644) 1361 1362 f, err = os.Open(path) 1363 c.Assert(err, jc.ErrorIsNil) 1364 defer f.Close() 1365 _, err = corecharm.ReadMeta(f) 1366 c.Assert(err, jc.ErrorIsNil) 1367 } 1368 1369 func createHookLock(c *gc.C, dataDir string) *fslock.Lock { 1370 lockDir := filepath.Join(dataDir, "locks") 1371 lock, err := fslock.NewLock(lockDir, "uniter-hook-execution", fslock.Defaults()) 1372 c.Assert(err, jc.ErrorIsNil) 1373 return lock 1374 } 1375 1376 type acquireHookSyncLock struct { 1377 message string 1378 } 1379 1380 func (s acquireHookSyncLock) step(c *gc.C, ctx *context) { 1381 lock := createHookLock(c, ctx.dataDir) 1382 c.Assert(lock.IsLocked(), jc.IsFalse) 1383 err := lock.Lock(s.message) 1384 c.Assert(err, jc.ErrorIsNil) 1385 } 1386 1387 var releaseHookSyncLock = custom{func(c *gc.C, ctx *context) { 1388 lock := createHookLock(c, ctx.dataDir) 1389 // Force the release. 1390 err := lock.BreakLock() 1391 c.Assert(err, jc.ErrorIsNil) 1392 }} 1393 1394 var verifyHookSyncLockUnlocked = custom{func(c *gc.C, ctx *context) { 1395 lock := createHookLock(c, ctx.dataDir) 1396 c.Assert(lock.IsLocked(), jc.IsFalse) 1397 }} 1398 1399 var verifyHookSyncLockLocked = custom{func(c *gc.C, ctx *context) { 1400 lock := createHookLock(c, ctx.dataDir) 1401 c.Assert(lock.IsLocked(), jc.IsTrue) 1402 }} 1403 1404 type setProxySettings proxy.Settings 1405 1406 func (s setProxySettings) step(c *gc.C, ctx *context) { 1407 attrs := map[string]interface{}{ 1408 "http-proxy": s.Http, 1409 "https-proxy": s.Https, 1410 "ftp-proxy": s.Ftp, 1411 "no-proxy": s.NoProxy, 1412 } 1413 err := ctx.st.UpdateModelConfig(attrs, nil, nil) 1414 c.Assert(err, jc.ErrorIsNil) 1415 } 1416 1417 type relationRunCommands []string 1418 1419 func (cmds relationRunCommands) step(c *gc.C, ctx *context) { 1420 commands := strings.Join(cmds, "\n") 1421 args := uniter.RunCommandsArgs{ 1422 Commands: commands, 1423 RelationId: 0, 1424 RemoteUnitName: "", 1425 } 1426 result, err := ctx.uniter.RunCommands(args) 1427 c.Assert(err, jc.ErrorIsNil) 1428 c.Check(result.Code, gc.Equals, 0) 1429 c.Check(string(result.Stdout), gc.Equals, "") 1430 c.Check(string(result.Stderr), gc.Equals, "") 1431 } 1432 1433 type runCommands []string 1434 1435 func (cmds runCommands) step(c *gc.C, ctx *context) { 1436 commands := strings.Join(cmds, "\n") 1437 args := uniter.RunCommandsArgs{ 1438 Commands: commands, 1439 RelationId: -1, 1440 RemoteUnitName: "", 1441 } 1442 result, err := ctx.uniter.RunCommands(args) 1443 c.Assert(err, jc.ErrorIsNil) 1444 c.Check(result.Code, gc.Equals, 0) 1445 c.Check(string(result.Stdout), gc.Equals, "") 1446 c.Check(string(result.Stderr), gc.Equals, "") 1447 } 1448 1449 type asyncRunCommands []string 1450 1451 func (cmds asyncRunCommands) step(c *gc.C, ctx *context) { 1452 commands := strings.Join(cmds, "\n") 1453 args := uniter.RunCommandsArgs{ 1454 Commands: commands, 1455 RelationId: -1, 1456 RemoteUnitName: "", 1457 } 1458 1459 var socketPath string 1460 if runtime.GOOS == "windows" { 1461 socketPath = `\\.\pipe\unit-u-0-run` 1462 } else { 1463 socketPath = filepath.Join(ctx.path, "run.socket") 1464 } 1465 1466 ctx.wg.Add(1) 1467 go func() { 1468 defer ctx.wg.Done() 1469 // make sure the socket exists 1470 client, err := sockets.Dial(socketPath) 1471 // Don't use asserts in go routines. 1472 if !c.Check(err, jc.ErrorIsNil) { 1473 return 1474 } 1475 defer client.Close() 1476 1477 var result utilexec.ExecResponse 1478 err = client.Call(uniter.JujuRunEndpoint, args, &result) 1479 if c.Check(err, jc.ErrorIsNil) { 1480 c.Check(result.Code, gc.Equals, 0) 1481 c.Check(string(result.Stdout), gc.Equals, "") 1482 c.Check(string(result.Stderr), gc.Equals, "") 1483 } 1484 }() 1485 } 1486 1487 type waitContextWaitGroup struct{} 1488 1489 func (waitContextWaitGroup) step(c *gc.C, ctx *context) { 1490 ctx.wg.Wait() 1491 } 1492 1493 type forceMinion struct{} 1494 1495 func (forceMinion) step(c *gc.C, ctx *context) { 1496 ctx.leaderTracker.setLeader(c, false) 1497 } 1498 1499 type forceLeader struct{} 1500 1501 func (forceLeader) step(c *gc.C, ctx *context) { 1502 ctx.leaderTracker.setLeader(c, true) 1503 } 1504 1505 func newMockLeaderTracker(ctx *context) *mockLeaderTracker { 1506 return &mockLeaderTracker{ 1507 ctx: ctx, 1508 } 1509 } 1510 1511 type mockLeaderTracker struct { 1512 mu sync.Mutex 1513 ctx *context 1514 isLeader bool 1515 waiting []chan struct{} 1516 } 1517 1518 func (mock *mockLeaderTracker) ServiceName() string { 1519 return mock.ctx.svc.Name() 1520 } 1521 1522 func (mock *mockLeaderTracker) ClaimDuration() time.Duration { 1523 return 30 * time.Second 1524 } 1525 1526 func (mock *mockLeaderTracker) ClaimLeader() leadership.Ticket { 1527 mock.mu.Lock() 1528 defer mock.mu.Unlock() 1529 if mock.isLeader { 1530 return fastTicket{true} 1531 } 1532 return fastTicket{} 1533 } 1534 1535 func (mock *mockLeaderTracker) WaitLeader() leadership.Ticket { 1536 mock.mu.Lock() 1537 defer mock.mu.Unlock() 1538 if mock.isLeader { 1539 return fastTicket{} 1540 } 1541 return mock.waitTicket() 1542 } 1543 1544 func (mock *mockLeaderTracker) WaitMinion() leadership.Ticket { 1545 mock.mu.Lock() 1546 defer mock.mu.Unlock() 1547 if !mock.isLeader { 1548 return fastTicket{} 1549 } 1550 return mock.waitTicket() 1551 } 1552 1553 func (mock *mockLeaderTracker) waitTicket() leadership.Ticket { 1554 // very internal, expects mu to be locked already 1555 ch := make(chan struct{}) 1556 mock.waiting = append(mock.waiting, ch) 1557 return waitTicket{ch} 1558 } 1559 1560 func (mock *mockLeaderTracker) setLeader(c *gc.C, isLeader bool) { 1561 mock.mu.Lock() 1562 defer mock.mu.Unlock() 1563 if mock.isLeader == isLeader { 1564 return 1565 } 1566 if isLeader { 1567 err := mock.ctx.leaderClaimer.ClaimLeadership( 1568 mock.ctx.svc.Name(), mock.ctx.unit.Name(), time.Minute, 1569 ) 1570 c.Assert(err, jc.ErrorIsNil) 1571 } else { 1572 leaseClock.Advance(61 * time.Second) 1573 time.Sleep(coretesting.ShortWait) 1574 } 1575 mock.isLeader = isLeader 1576 for _, ch := range mock.waiting { 1577 close(ch) 1578 } 1579 mock.waiting = nil 1580 } 1581 1582 type waitTicket struct { 1583 ch chan struct{} 1584 } 1585 1586 func (t waitTicket) Ready() <-chan struct{} { 1587 return t.ch 1588 } 1589 1590 func (t waitTicket) Wait() bool { 1591 return false 1592 } 1593 1594 type fastTicket struct { 1595 value bool 1596 } 1597 1598 func (fastTicket) Ready() <-chan struct{} { 1599 ch := make(chan struct{}) 1600 close(ch) 1601 return ch 1602 } 1603 1604 func (t fastTicket) Wait() bool { 1605 return t.value 1606 } 1607 1608 type setLeaderSettings map[string]string 1609 1610 func (s setLeaderSettings) step(c *gc.C, ctx *context) { 1611 // We do this directly on State, not the API, so we don't have to worry 1612 // about getting an API conn for whatever unit's meant to be leader. 1613 err := ctx.svc.UpdateLeaderSettings(successToken{}, s) 1614 c.Assert(err, jc.ErrorIsNil) 1615 ctx.s.BackingState.StartSync() 1616 } 1617 1618 type successToken struct{} 1619 1620 func (successToken) Check(interface{}) error { 1621 return nil 1622 } 1623 1624 type verifyLeaderSettings map[string]string 1625 1626 func (verify verifyLeaderSettings) step(c *gc.C, ctx *context) { 1627 actual, err := ctx.svc.LeaderSettings() 1628 c.Assert(err, jc.ErrorIsNil) 1629 c.Assert(actual, jc.DeepEquals, map[string]string(verify)) 1630 } 1631 1632 type verifyFile struct { 1633 filename string 1634 content string 1635 } 1636 1637 func (verify verifyFile) fileExists() bool { 1638 _, err := os.Stat(verify.filename) 1639 return err == nil 1640 } 1641 1642 func (verify verifyFile) checkContent(c *gc.C) { 1643 content, err := ioutil.ReadFile(verify.filename) 1644 c.Assert(err, jc.ErrorIsNil) 1645 c.Assert(string(content), gc.Equals, verify.content) 1646 } 1647 1648 func (verify verifyFile) step(c *gc.C, ctx *context) { 1649 if verify.fileExists() { 1650 verify.checkContent(c) 1651 return 1652 } 1653 c.Logf("waiting for file: %s", verify.filename) 1654 timeout := time.After(worstCase) 1655 for { 1656 select { 1657 case <-time.After(coretesting.ShortWait): 1658 if verify.fileExists() { 1659 verify.checkContent(c) 1660 return 1661 } 1662 case <-timeout: 1663 c.Fatalf("file does not exist") 1664 } 1665 } 1666 } 1667 1668 // verify that the file does not exist 1669 type verifyNoFile struct { 1670 filename string 1671 } 1672 1673 func (verify verifyNoFile) step(c *gc.C, ctx *context) { 1674 c.Assert(verify.filename, jc.DoesNotExist) 1675 // Wait a short time and check again. 1676 time.Sleep(coretesting.ShortWait) 1677 c.Assert(verify.filename, jc.DoesNotExist) 1678 } 1679 1680 type mockCharmDirGuard struct{} 1681 1682 // Unlock implements fortress.Guard. 1683 func (*mockCharmDirGuard) Unlock() error { return nil } 1684 1685 // Lockdown implements fortress.Guard. 1686 func (*mockCharmDirGuard) Lockdown(_ fortress.Abort) error { return nil } 1687 1688 // prepareGitUniter runs a sequence of uniter tests with the manifest deployer 1689 // replacement logic patched out, simulating the effect of running an older 1690 // version of juju that exclusively used a git deployer. This is useful both 1691 // for testing the new deployer-replacement code *and* for running the old 1692 // tests against the new, patched code to check that the tweaks made to 1693 // accommodate the manifest deployer do not change the original behaviour as 1694 // simulated by the patched-out code. 1695 type prepareGitUniter struct { 1696 prepSteps []stepper 1697 } 1698 1699 func (s prepareGitUniter) step(c *gc.C, ctx *context) { 1700 c.Assert(ctx.uniter, gc.IsNil, gc.Commentf("please don't try to patch stuff while the uniter's running")) 1701 newDeployer := func(charmPath, dataPath string, bundles charm.BundleReader) (charm.Deployer, error) { 1702 return charm.NewGitDeployer(charmPath, dataPath, bundles), nil 1703 } 1704 restoreNewDeployer := gt.PatchValue(&charm.NewDeployer, newDeployer) 1705 defer restoreNewDeployer() 1706 1707 fixDeployer := func(deployer *charm.Deployer) error { 1708 return nil 1709 } 1710 restoreFixDeployer := gt.PatchValue(&charm.FixDeployer, fixDeployer) 1711 defer restoreFixDeployer() 1712 1713 for _, prepStep := range s.prepSteps { 1714 step(c, ctx, prepStep) 1715 } 1716 if ctx.uniter != nil { 1717 step(c, ctx, stopUniter{}) 1718 } 1719 } 1720 1721 func ugt(summary string, steps ...stepper) uniterTest { 1722 return ut(summary, prepareGitUniter{steps}) 1723 } 1724 1725 type verifyGitCharm struct { 1726 revision int 1727 dirty bool 1728 } 1729 1730 func (s verifyGitCharm) step(c *gc.C, ctx *context) { 1731 charmPath := filepath.Join(ctx.path, "charm") 1732 if !s.dirty { 1733 revisionPath := filepath.Join(charmPath, "revision") 1734 content, err := ioutil.ReadFile(revisionPath) 1735 c.Assert(err, jc.ErrorIsNil) 1736 c.Assert(string(content), gc.Equals, strconv.Itoa(s.revision)) 1737 err = ctx.unit.Refresh() 1738 c.Assert(err, jc.ErrorIsNil) 1739 url, ok := ctx.unit.CharmURL() 1740 c.Assert(ok, jc.IsTrue) 1741 c.Assert(url, gc.DeepEquals, curl(s.revision)) 1742 } 1743 1744 // Before we try to check the git status, make sure expected hooks are all 1745 // complete, to prevent the test and the uniter interfering with each other. 1746 step(c, ctx, waitHooks{}) 1747 step(c, ctx, waitHooks{}) 1748 cmd := exec.Command("git", "status") 1749 cmd.Dir = filepath.Join(ctx.path, "charm") 1750 out, err := cmd.CombinedOutput() 1751 c.Assert(err, jc.ErrorIsNil) 1752 cmp := gc.Matches 1753 if s.dirty { 1754 cmp = gc.Not(gc.Matches) 1755 } 1756 c.Assert(string(out), cmp, "(# )?On branch master\nnothing to commit.*\n") 1757 } 1758 1759 type startGitUpgradeError struct{} 1760 1761 func (s startGitUpgradeError) step(c *gc.C, ctx *context) { 1762 steps := []stepper{ 1763 createCharm{ 1764 customize: func(c *gc.C, ctx *context, path string) { 1765 appendHook(c, path, "start", "echo STARTDATA > data") 1766 }, 1767 }, 1768 serveCharm{}, 1769 createUniter{}, 1770 waitUnitAgent{ 1771 status: status.StatusIdle, 1772 }, 1773 waitHooks(startupHooks(false)), 1774 verifyGitCharm{dirty: true}, 1775 1776 createCharm{ 1777 revision: 1, 1778 customize: func(c *gc.C, ctx *context, path string) { 1779 ft.File{"data", "<nelson>ha ha</nelson>", 0644}.Create(c, path) 1780 ft.File{"ignore", "anything", 0644}.Create(c, path) 1781 }, 1782 }, 1783 serveCharm{}, 1784 upgradeCharm{revision: 1}, 1785 waitUnitAgent{ 1786 statusGetter: unitStatusGetter, 1787 status: status.StatusError, 1788 info: "upgrade failed", 1789 charm: 1, 1790 }, 1791 verifyWaiting{}, 1792 verifyGitCharm{dirty: true}, 1793 } 1794 for _, s_ := range steps { 1795 step(c, ctx, s_) 1796 } 1797 } 1798 1799 type provisionStorage struct{} 1800 1801 func (s provisionStorage) step(c *gc.C, ctx *context) { 1802 storageAttachments, err := ctx.st.UnitStorageAttachments(ctx.unit.UnitTag()) 1803 c.Assert(err, jc.ErrorIsNil) 1804 c.Assert(storageAttachments, gc.HasLen, 1) 1805 1806 filesystem, err := ctx.st.StorageInstanceFilesystem(storageAttachments[0].StorageInstance()) 1807 c.Assert(err, jc.ErrorIsNil) 1808 1809 filesystemInfo := state.FilesystemInfo{ 1810 Size: 1024, 1811 FilesystemId: "fs-id", 1812 } 1813 err = ctx.st.SetFilesystemInfo(filesystem.FilesystemTag(), filesystemInfo) 1814 c.Assert(err, jc.ErrorIsNil) 1815 1816 machineId, err := ctx.unit.AssignedMachineId() 1817 c.Assert(err, jc.ErrorIsNil) 1818 1819 filesystemAttachmentInfo := state.FilesystemAttachmentInfo{ 1820 MountPoint: "/srv/wordpress/content", 1821 } 1822 err = ctx.st.SetFilesystemAttachmentInfo( 1823 names.NewMachineTag(machineId), 1824 filesystem.FilesystemTag(), 1825 filesystemAttachmentInfo, 1826 ) 1827 c.Assert(err, jc.ErrorIsNil) 1828 } 1829 1830 type destroyStorageAttachment struct{} 1831 1832 func (s destroyStorageAttachment) step(c *gc.C, ctx *context) { 1833 storageAttachments, err := ctx.st.UnitStorageAttachments(ctx.unit.UnitTag()) 1834 c.Assert(err, jc.ErrorIsNil) 1835 c.Assert(storageAttachments, gc.HasLen, 1) 1836 err = ctx.st.DestroyStorageAttachment( 1837 storageAttachments[0].StorageInstance(), 1838 ctx.unit.UnitTag(), 1839 ) 1840 c.Assert(err, jc.ErrorIsNil) 1841 } 1842 1843 type verifyStorageDetached struct{} 1844 1845 func (s verifyStorageDetached) step(c *gc.C, ctx *context) { 1846 storageAttachments, err := ctx.st.UnitStorageAttachments(ctx.unit.UnitTag()) 1847 c.Assert(err, jc.ErrorIsNil) 1848 c.Assert(storageAttachments, gc.HasLen, 0) 1849 } 1850 1851 type expectError struct { 1852 err string 1853 } 1854 1855 func (s expectError) step(c *gc.C, ctx *context) { 1856 ctx.setExpectedError(s.err) 1857 } 1858 1859 // manualTicker will be used to generate collect-metrics events 1860 // in a time-independent manner for testing. 1861 type manualTicker struct { 1862 c chan time.Time 1863 } 1864 1865 // Tick sends a signal on the ticker channel. 1866 func (t *manualTicker) Tick() error { 1867 select { 1868 case t.c <- time.Now(): 1869 case <-time.After(worstCase): 1870 return fmt.Errorf("ticker channel blocked") 1871 } 1872 return nil 1873 } 1874 1875 // ReturnTimer can be used to replace the update status signal generator. 1876 func (t *manualTicker) ReturnTimer() <-chan time.Time { 1877 return t.c 1878 } 1879 1880 func newManualTicker() *manualTicker { 1881 return &manualTicker{ 1882 c: make(chan time.Time), 1883 } 1884 }