github.com/mhilton/juju-juju@v0.0.0-20150901100907-a94dd2c73455/worker/uniter/util_test.go (about) 1 // Copyright 2012-2014 Canonical Ltd. 2 // Licensed under the AGPLv3, see LICENCE file for details. 3 4 package uniter_test 5 6 import ( 7 "bytes" 8 "fmt" 9 "io/ioutil" 10 "os" 11 "os/exec" 12 "path/filepath" 13 "reflect" 14 "runtime" 15 "strconv" 16 "strings" 17 "sync" 18 "time" 19 20 "github.com/juju/errors" 21 "github.com/juju/names" 22 gt "github.com/juju/testing" 23 jc "github.com/juju/testing/checkers" 24 ft "github.com/juju/testing/filetesting" 25 "github.com/juju/utils" 26 utilexec "github.com/juju/utils/exec" 27 "github.com/juju/utils/fslock" 28 "github.com/juju/utils/proxy" 29 gc "gopkg.in/check.v1" 30 corecharm "gopkg.in/juju/charm.v5" 31 goyaml "gopkg.in/yaml.v1" 32 33 apiuniter "github.com/juju/juju/api/uniter" 34 "github.com/juju/juju/apiserver/params" 35 "github.com/juju/juju/juju/sockets" 36 "github.com/juju/juju/juju/testing" 37 coreleadership "github.com/juju/juju/leadership" 38 "github.com/juju/juju/network" 39 "github.com/juju/juju/state" 40 "github.com/juju/juju/state/storage" 41 "github.com/juju/juju/testcharms" 42 coretesting "github.com/juju/juju/testing" 43 "github.com/juju/juju/worker" 44 "github.com/juju/juju/worker/leadership" 45 "github.com/juju/juju/worker/uniter" 46 "github.com/juju/juju/worker/uniter/charm" 47 "github.com/juju/juju/worker/uniter/metrics" 48 "github.com/juju/juju/worker/uniter/operation" 49 ) 50 51 // worstCase is used for timeouts when timing out 52 // will fail the test. Raising this value should 53 // not affect the overall running time of the tests 54 // unless they fail. 55 const worstCase = coretesting.LongWait 56 57 // Assign the unit to a provisioned machine with dummy addresses set. 58 func assertAssignUnit(c *gc.C, st *state.State, u *state.Unit) { 59 err := u.AssignToNewMachine() 60 c.Assert(err, jc.ErrorIsNil) 61 mid, err := u.AssignedMachineId() 62 c.Assert(err, jc.ErrorIsNil) 63 machine, err := st.Machine(mid) 64 c.Assert(err, jc.ErrorIsNil) 65 err = machine.SetProvisioned("i-exist", "fake_nonce", nil) 66 c.Assert(err, jc.ErrorIsNil) 67 err = machine.SetProviderAddresses(network.Address{ 68 Type: network.IPv4Address, 69 Scope: network.ScopeCloudLocal, 70 Value: "private.address.example.com", 71 }, network.Address{ 72 Type: network.IPv4Address, 73 Scope: network.ScopePublic, 74 Value: "public.address.example.com", 75 }) 76 c.Assert(err, jc.ErrorIsNil) 77 } 78 79 type context struct { 80 uuid string 81 path string 82 dataDir string 83 s *UniterSuite 84 st *state.State 85 api *apiuniter.State 86 leaderClaimer coreleadership.Claimer 87 leaderTracker *mockLeaderTracker 88 charms map[string][]byte 89 hooks []string 90 sch *state.Charm 91 svc *state.Service 92 unit *state.Unit 93 uniter *uniter.Uniter 94 relatedSvc *state.Service 95 relation *state.Relation 96 relationUnits map[string]*state.RelationUnit 97 subordinate *state.Unit 98 collectMetricsTicker *uniter.ManualTicker 99 sendMetricsTicker *uniter.ManualTicker 100 updateStatusHookTicker *uniter.ManualTicker 101 err string 102 103 wg sync.WaitGroup 104 mu sync.Mutex 105 hooksCompleted []string 106 } 107 108 var _ uniter.UniterExecutionObserver = (*context)(nil) 109 110 // HookCompleted implements the UniterExecutionObserver interface. 111 func (ctx *context) HookCompleted(hookName string) { 112 ctx.mu.Lock() 113 ctx.hooksCompleted = append(ctx.hooksCompleted, hookName) 114 ctx.mu.Unlock() 115 } 116 117 // HookFailed implements the UniterExecutionObserver interface. 118 func (ctx *context) HookFailed(hookName string) { 119 ctx.mu.Lock() 120 ctx.hooksCompleted = append(ctx.hooksCompleted, "fail-"+hookName) 121 ctx.mu.Unlock() 122 } 123 124 func (ctx *context) setExpectedError(err string) { 125 ctx.mu.Lock() 126 ctx.err = err 127 ctx.mu.Unlock() 128 } 129 130 func (ctx *context) run(c *gc.C, steps []stepper) { 131 defer func() { 132 if ctx.uniter != nil { 133 err := ctx.uniter.Stop() 134 if ctx.err == "" { 135 c.Assert(err, jc.ErrorIsNil) 136 } else { 137 c.Assert(err, gc.ErrorMatches, ctx.err) 138 } 139 } 140 }() 141 for i, s := range steps { 142 c.Logf("step %d:\n", i) 143 step(c, ctx, s) 144 } 145 } 146 147 func (ctx *context) apiLogin(c *gc.C) { 148 password, err := utils.RandomPassword() 149 c.Assert(err, jc.ErrorIsNil) 150 err = ctx.unit.SetPassword(password) 151 c.Assert(err, jc.ErrorIsNil) 152 st := ctx.s.OpenAPIAs(c, ctx.unit.Tag(), password) 153 c.Assert(st, gc.NotNil) 154 c.Logf("API: login as %q successful", ctx.unit.Tag()) 155 ctx.api, err = st.Uniter() 156 c.Assert(err, jc.ErrorIsNil) 157 c.Assert(ctx.api, gc.NotNil) 158 ctx.leaderClaimer = ctx.st.LeadershipClaimer() 159 ctx.leaderTracker = newMockLeaderTracker(ctx) 160 ctx.leaderTracker.setLeader(c, true) 161 } 162 163 func (ctx *context) writeExplicitHook(c *gc.C, path string, contents string) { 164 err := ioutil.WriteFile(path+cmdSuffix, []byte(contents), 0755) 165 c.Assert(err, jc.ErrorIsNil) 166 } 167 168 func (ctx *context) writeHook(c *gc.C, path string, good bool) { 169 hook := badHook 170 if good { 171 hook = goodHook 172 } 173 content := fmt.Sprintf(hook, filepath.Base(path)) 174 ctx.writeExplicitHook(c, path, content) 175 } 176 177 func (ctx *context) writeActions(c *gc.C, path string, names []string) { 178 for _, name := range names { 179 ctx.writeAction(c, path, name) 180 } 181 } 182 183 func (ctx *context) writeMetricsYaml(c *gc.C, path string) { 184 metricsYamlPath := filepath.Join(path, "metrics.yaml") 185 var metricsYamlFull []byte = []byte(` 186 metrics: 187 pings: 188 type: gauge 189 description: sample metric 190 `) 191 err := ioutil.WriteFile(metricsYamlPath, []byte(metricsYamlFull), 0755) 192 c.Assert(err, jc.ErrorIsNil) 193 } 194 195 func (ctx *context) writeAction(c *gc.C, path, name string) { 196 actionPath := filepath.Join(path, "actions", name) 197 action := actions[name] 198 err := ioutil.WriteFile(actionPath+cmdSuffix, []byte(action), 0755) 199 c.Assert(err, jc.ErrorIsNil) 200 } 201 202 func (ctx *context) writeActionsYaml(c *gc.C, path string, names ...string) { 203 var actionsYaml = map[string]string{ 204 "base": "", 205 "snapshot": ` 206 snapshot: 207 description: Take a snapshot of the database. 208 params: 209 outfile: 210 description: "The file to write out to." 211 type: string 212 required: ["outfile"] 213 `[1:], 214 "action-log": ` 215 action-log: 216 `[1:], 217 "action-log-fail": ` 218 action-log-fail: 219 `[1:], 220 "action-log-fail-error": ` 221 action-log-fail-error: 222 `[1:], 223 "action-reboot": ` 224 action-reboot: 225 `[1:], 226 } 227 actionsYamlPath := filepath.Join(path, "actions.yaml") 228 var actionsYamlFull string 229 // Build an appropriate actions.yaml 230 if names[0] != "base" { 231 names = append([]string{"base"}, names...) 232 } 233 for _, name := range names { 234 actionsYamlFull = strings.Join( 235 []string{actionsYamlFull, actionsYaml[name]}, "\n") 236 } 237 err := ioutil.WriteFile(actionsYamlPath, []byte(actionsYamlFull), 0755) 238 c.Assert(err, jc.ErrorIsNil) 239 } 240 241 func (ctx *context) matchHooks(c *gc.C) (match bool, overshoot bool) { 242 ctx.mu.Lock() 243 defer ctx.mu.Unlock() 244 c.Logf("ctx.hooksCompleted: %#v", ctx.hooksCompleted) 245 if len(ctx.hooksCompleted) < len(ctx.hooks) { 246 return false, false 247 } 248 for i, e := range ctx.hooks { 249 if ctx.hooksCompleted[i] != e { 250 return false, false 251 } 252 } 253 return true, len(ctx.hooksCompleted) > len(ctx.hooks) 254 } 255 256 type uniterTest struct { 257 summary string 258 steps []stepper 259 } 260 261 func ut(summary string, steps ...stepper) uniterTest { 262 return uniterTest{summary, steps} 263 } 264 265 type stepper interface { 266 step(c *gc.C, ctx *context) 267 } 268 269 func step(c *gc.C, ctx *context, s stepper) { 270 c.Logf("%#v", s) 271 s.step(c, ctx) 272 } 273 274 type ensureStateWorker struct{} 275 276 func (s ensureStateWorker) step(c *gc.C, ctx *context) { 277 addresses, err := ctx.st.Addresses() 278 if err != nil || len(addresses) == 0 { 279 addStateServerMachine(c, ctx.st) 280 } 281 addresses, err = ctx.st.APIAddressesFromMachines() 282 c.Assert(err, jc.ErrorIsNil) 283 c.Assert(addresses, gc.HasLen, 1) 284 } 285 286 func addStateServerMachine(c *gc.C, st *state.State) { 287 // The AddStateServerMachine call will update the API host ports 288 // to made-up addresses. We need valid addresses so that the uniter 289 // can download charms from the API server. 290 apiHostPorts, err := st.APIHostPorts() 291 c.Assert(err, gc.IsNil) 292 testing.AddStateServerMachine(c, st) 293 err = st.SetAPIHostPorts(apiHostPorts) 294 c.Assert(err, gc.IsNil) 295 } 296 297 type createCharm struct { 298 revision int 299 badHooks []string 300 customize func(*gc.C, *context, string) 301 } 302 303 var ( 304 baseCharmHooks = []string{ 305 "install", "start", "config-changed", "upgrade-charm", "stop", 306 "db-relation-joined", "db-relation-changed", "db-relation-departed", 307 "db-relation-broken", "meter-status-changed", "collect-metrics", "update-status", 308 } 309 leaderCharmHooks = []string{ 310 "leader-elected", "leader-deposed", "leader-settings-changed", 311 } 312 storageCharmHooks = []string{ 313 "wp-content-storage-attached", "wp-content-storage-detaching", 314 } 315 ) 316 317 func startupHooks(minion bool) []string { 318 leaderHook := "leader-elected" 319 if minion { 320 leaderHook = "leader-settings-changed" 321 } 322 return []string{"install", leaderHook, "config-changed", "start"} 323 } 324 325 func (s createCharm) step(c *gc.C, ctx *context) { 326 base := testcharms.Repo.ClonedDirPath(c.MkDir(), "wordpress") 327 328 allCharmHooks := baseCharmHooks 329 allCharmHooks = append(allCharmHooks, leaderCharmHooks...) 330 allCharmHooks = append(allCharmHooks, storageCharmHooks...) 331 332 for _, name := range allCharmHooks { 333 path := filepath.Join(base, "hooks", name) 334 good := true 335 for _, bad := range s.badHooks { 336 if name == bad { 337 good = false 338 } 339 } 340 ctx.writeHook(c, path, good) 341 } 342 if s.customize != nil { 343 s.customize(c, ctx, base) 344 } 345 dir, err := corecharm.ReadCharmDir(base) 346 c.Assert(err, jc.ErrorIsNil) 347 err = dir.SetDiskRevision(s.revision) 348 c.Assert(err, jc.ErrorIsNil) 349 step(c, ctx, addCharm{dir, curl(s.revision)}) 350 } 351 352 func (s createCharm) charmURL() string { 353 return curl(s.revision).String() 354 } 355 356 type addCharm struct { 357 dir *corecharm.CharmDir 358 curl *corecharm.URL 359 } 360 361 func (s addCharm) step(c *gc.C, ctx *context) { 362 var buf bytes.Buffer 363 err := s.dir.ArchiveTo(&buf) 364 c.Assert(err, jc.ErrorIsNil) 365 body := buf.Bytes() 366 hash, _, err := utils.ReadSHA256(&buf) 367 c.Assert(err, jc.ErrorIsNil) 368 369 storagePath := fmt.Sprintf("/charms/%s/%d", s.dir.Meta().Name, s.dir.Revision()) 370 ctx.charms[storagePath] = body 371 ctx.sch, err = ctx.st.AddCharm(s.dir, s.curl, storagePath, hash) 372 c.Assert(err, jc.ErrorIsNil) 373 } 374 375 type serveCharm struct{} 376 377 func (s serveCharm) step(c *gc.C, ctx *context) { 378 storage := storage.NewStorage(ctx.st.EnvironUUID(), ctx.st.MongoSession()) 379 for storagePath, data := range ctx.charms { 380 err := storage.Put(storagePath, bytes.NewReader(data), int64(len(data))) 381 c.Assert(err, jc.ErrorIsNil) 382 delete(ctx.charms, storagePath) 383 } 384 } 385 386 type createServiceAndUnit struct { 387 serviceName string 388 } 389 390 func (csau createServiceAndUnit) step(c *gc.C, ctx *context) { 391 if csau.serviceName == "" { 392 csau.serviceName = "u" 393 } 394 sch, err := ctx.st.Charm(curl(0)) 395 c.Assert(err, jc.ErrorIsNil) 396 svc := ctx.s.AddTestingService(c, csau.serviceName, sch) 397 unit, err := svc.AddUnit() 398 c.Assert(err, jc.ErrorIsNil) 399 400 // Assign the unit to a provisioned machine to match expected state. 401 assertAssignUnit(c, ctx.st, unit) 402 ctx.svc = svc 403 ctx.unit = unit 404 405 ctx.apiLogin(c) 406 } 407 408 type createUniter struct { 409 minion bool 410 executorFunc uniter.NewExecutorFunc 411 } 412 413 func (s createUniter) step(c *gc.C, ctx *context) { 414 step(c, ctx, ensureStateWorker{}) 415 step(c, ctx, createServiceAndUnit{}) 416 if s.minion { 417 step(c, ctx, forceMinion{}) 418 } 419 step(c, ctx, startUniter{newExecutorFunc: s.executorFunc}) 420 step(c, ctx, waitAddresses{}) 421 } 422 423 type waitAddresses struct{} 424 425 func (waitAddresses) step(c *gc.C, ctx *context) { 426 timeout := time.After(worstCase) 427 for { 428 select { 429 case <-timeout: 430 c.Fatalf("timed out waiting for unit addresses") 431 case <-time.After(coretesting.ShortWait): 432 err := ctx.unit.Refresh() 433 if err != nil { 434 c.Fatalf("unit refresh failed: %v", err) 435 } 436 // GZ 2013-07-10: Hardcoded values from dummy environ 437 // special cased here, questionable. 438 private, _ := ctx.unit.PrivateAddress() 439 if private != "private.address.example.com" { 440 continue 441 } 442 public, _ := ctx.unit.PublicAddress() 443 if public != "public.address.example.com" { 444 continue 445 } 446 return 447 } 448 } 449 } 450 451 type startUniter struct { 452 unitTag string 453 newExecutorFunc uniter.NewExecutorFunc 454 } 455 456 func (s startUniter) step(c *gc.C, ctx *context) { 457 if s.unitTag == "" { 458 s.unitTag = "unit-u-0" 459 } 460 if ctx.uniter != nil { 461 panic("don't start two uniters!") 462 } 463 if ctx.api == nil { 464 panic("API connection not established") 465 } 466 tag, err := names.ParseUnitTag(s.unitTag) 467 if err != nil { 468 panic(err.Error()) 469 } 470 locksDir := filepath.Join(ctx.dataDir, "locks") 471 lock, err := fslock.NewLock(locksDir, "uniter-hook-execution") 472 c.Assert(err, jc.ErrorIsNil) 473 operationExecutor := operation.NewExecutor 474 if s.newExecutorFunc != nil { 475 operationExecutor = s.newExecutorFunc 476 } 477 478 uniterParams := uniter.UniterParams{ 479 UniterFacade: ctx.api, 480 UnitTag: tag, 481 LeadershipTracker: ctx.leaderTracker, 482 DataDir: ctx.dataDir, 483 MachineLock: lock, 484 MetricsTimerChooser: uniter.NewTestingMetricsTimerChooser( 485 ctx.collectMetricsTicker.ReturnTimer, 486 ctx.sendMetricsTicker.ReturnTimer, 487 ), 488 UpdateStatusSignal: ctx.updateStatusHookTicker.ReturnTimer, 489 NewOperationExecutor: operationExecutor, 490 } 491 ctx.uniter = uniter.NewUniter(&uniterParams) 492 uniter.SetUniterObserver(ctx.uniter, ctx) 493 } 494 495 type waitUniterDead struct { 496 err string 497 } 498 499 func (s waitUniterDead) step(c *gc.C, ctx *context) { 500 if s.err != "" { 501 err := s.waitDead(c, ctx) 502 c.Assert(err, gc.ErrorMatches, s.err) 503 return 504 } 505 506 // In the default case, we're waiting for worker.ErrTerminateAgent, but 507 // the path to that error can be tricky. If the unit becomes Dead at an 508 // inconvenient time, unrelated calls can fail -- as they should -- but 509 // not be detected as worker.ErrTerminateAgent. In this case, we restart 510 // the uniter and check that it fails as expected when starting up; this 511 // mimics the behaviour of the unit agent and verifies that the UA will, 512 // eventually, see the correct error and respond appropriately. 513 err := s.waitDead(c, ctx) 514 if err != worker.ErrTerminateAgent { 515 step(c, ctx, startUniter{}) 516 err = s.waitDead(c, ctx) 517 } 518 c.Assert(err, gc.Equals, worker.ErrTerminateAgent) 519 err = ctx.unit.Refresh() 520 c.Assert(err, jc.ErrorIsNil) 521 c.Assert(ctx.unit.Life(), gc.Equals, state.Dead) 522 } 523 524 func (s waitUniterDead) waitDead(c *gc.C, ctx *context) error { 525 u := ctx.uniter 526 ctx.uniter = nil 527 timeout := time.After(worstCase) 528 for { 529 // The repeated StartSync is to ensure timely completion of this method 530 // in the case(s) where a state change causes a uniter action which 531 // causes a state change which causes a uniter action, in which case we 532 // need more than one sync. At the moment there's only one situation 533 // that causes this -- setting the unit's service to Dying -- but it's 534 // not an intrinsically insane pattern of action (and helps to simplify 535 // the filter code) so this test seems like a small price to pay. 536 ctx.s.BackingState.StartSync() 537 select { 538 case <-u.Dead(): 539 return u.Wait() 540 case <-time.After(coretesting.ShortWait): 541 continue 542 case <-timeout: 543 c.Fatalf("uniter still alive") 544 } 545 } 546 } 547 548 type stopUniter struct { 549 err string 550 } 551 552 func (s stopUniter) step(c *gc.C, ctx *context) { 553 u := ctx.uniter 554 if u == nil { 555 c.Logf("uniter not started, skipping stopUniter{}") 556 return 557 } 558 ctx.uniter = nil 559 err := u.Stop() 560 if s.err == "" { 561 c.Assert(err, jc.ErrorIsNil) 562 } else { 563 c.Assert(err, gc.ErrorMatches, s.err) 564 } 565 } 566 567 type verifyWaiting struct{} 568 569 func (s verifyWaiting) step(c *gc.C, ctx *context) { 570 step(c, ctx, stopUniter{}) 571 step(c, ctx, startUniter{}) 572 step(c, ctx, waitHooks{}) 573 } 574 575 type verifyRunning struct { 576 minion bool 577 } 578 579 func (s verifyRunning) step(c *gc.C, ctx *context) { 580 step(c, ctx, stopUniter{}) 581 step(c, ctx, startUniter{}) 582 var hooks []string 583 if s.minion { 584 hooks = append(hooks, "leader-settings-changed") 585 } 586 hooks = append(hooks, "config-changed") 587 step(c, ctx, waitHooks(hooks)) 588 } 589 590 type startupErrorWithCustomCharm struct { 591 badHook string 592 customize func(*gc.C, *context, string) 593 } 594 595 func (s startupErrorWithCustomCharm) step(c *gc.C, ctx *context) { 596 step(c, ctx, createCharm{ 597 badHooks: []string{s.badHook}, 598 customize: s.customize, 599 }) 600 step(c, ctx, serveCharm{}) 601 step(c, ctx, createUniter{}) 602 step(c, ctx, waitUnitAgent{ 603 statusGetter: unitStatusGetter, 604 status: params.StatusError, 605 info: fmt.Sprintf(`hook failed: %q`, s.badHook), 606 }) 607 for _, hook := range startupHooks(false) { 608 if hook == s.badHook { 609 step(c, ctx, waitHooks{"fail-" + hook}) 610 break 611 } 612 step(c, ctx, waitHooks{hook}) 613 } 614 step(c, ctx, verifyCharm{}) 615 } 616 617 type startupError struct { 618 badHook string 619 } 620 621 func (s startupError) step(c *gc.C, ctx *context) { 622 step(c, ctx, createCharm{badHooks: []string{s.badHook}}) 623 step(c, ctx, serveCharm{}) 624 step(c, ctx, createUniter{}) 625 step(c, ctx, waitUnitAgent{ 626 statusGetter: unitStatusGetter, 627 status: params.StatusError, 628 info: fmt.Sprintf(`hook failed: %q`, s.badHook), 629 }) 630 for _, hook := range startupHooks(false) { 631 if hook == s.badHook { 632 step(c, ctx, waitHooks{"fail-" + hook}) 633 break 634 } 635 step(c, ctx, waitHooks{hook}) 636 } 637 step(c, ctx, verifyCharm{}) 638 } 639 640 type quickStart struct { 641 minion bool 642 } 643 644 func (s quickStart) step(c *gc.C, ctx *context) { 645 step(c, ctx, createCharm{}) 646 step(c, ctx, serveCharm{}) 647 step(c, ctx, createUniter{minion: s.minion}) 648 step(c, ctx, waitUnitAgent{status: params.StatusIdle}) 649 step(c, ctx, waitHooks(startupHooks(s.minion))) 650 step(c, ctx, verifyCharm{}) 651 } 652 653 type quickStartRelation struct{} 654 655 func (s quickStartRelation) step(c *gc.C, ctx *context) { 656 step(c, ctx, quickStart{}) 657 step(c, ctx, addRelation{}) 658 step(c, ctx, addRelationUnit{}) 659 step(c, ctx, waitHooks{"db-relation-joined mysql/0 db:0", "db-relation-changed mysql/0 db:0"}) 660 step(c, ctx, verifyRunning{}) 661 } 662 663 type startupRelationError struct { 664 badHook string 665 } 666 667 func (s startupRelationError) step(c *gc.C, ctx *context) { 668 step(c, ctx, createCharm{badHooks: []string{s.badHook}}) 669 step(c, ctx, serveCharm{}) 670 step(c, ctx, createUniter{}) 671 step(c, ctx, waitUnitAgent{status: params.StatusIdle}) 672 step(c, ctx, waitHooks(startupHooks(false))) 673 step(c, ctx, verifyCharm{}) 674 step(c, ctx, addRelation{}) 675 step(c, ctx, addRelationUnit{}) 676 } 677 678 type resolveError struct { 679 resolved state.ResolvedMode 680 } 681 682 func (s resolveError) step(c *gc.C, ctx *context) { 683 err := ctx.unit.SetResolved(s.resolved) 684 c.Assert(err, jc.ErrorIsNil) 685 } 686 687 type statusfunc func() (state.StatusInfo, error) 688 689 type statusfuncGetter func(ctx *context) statusfunc 690 691 var unitStatusGetter = func(ctx *context) statusfunc { 692 return func() (state.StatusInfo, error) { 693 return ctx.unit.Status() 694 } 695 } 696 697 var agentStatusGetter = func(ctx *context) statusfunc { 698 return func() (state.StatusInfo, error) { 699 return ctx.unit.AgentStatus() 700 } 701 } 702 703 type waitUnitAgent struct { 704 statusGetter func(ctx *context) statusfunc 705 status params.Status 706 info string 707 data map[string]interface{} 708 charm int 709 resolved state.ResolvedMode 710 } 711 712 func (s waitUnitAgent) step(c *gc.C, ctx *context) { 713 if s.statusGetter == nil { 714 s.statusGetter = agentStatusGetter 715 } 716 timeout := time.After(worstCase) 717 for { 718 ctx.s.BackingState.StartSync() 719 select { 720 case <-time.After(coretesting.ShortWait): 721 err := ctx.unit.Refresh() 722 if err != nil { 723 c.Fatalf("cannot refresh unit: %v", err) 724 } 725 resolved := ctx.unit.Resolved() 726 if resolved != s.resolved { 727 c.Logf("want resolved mode %q, got %q; still waiting", s.resolved, resolved) 728 continue 729 } 730 url, ok := ctx.unit.CharmURL() 731 if !ok || *url != *curl(s.charm) { 732 var got string 733 if ok { 734 got = url.String() 735 } 736 c.Logf("want unit charm %q, got %q; still waiting", curl(s.charm), got) 737 continue 738 } 739 statusInfo, err := s.statusGetter(ctx)() 740 c.Assert(err, jc.ErrorIsNil) 741 if string(statusInfo.Status) != string(s.status) { 742 c.Logf("want unit status %q, got %q; still waiting", s.status, statusInfo.Status) 743 continue 744 } 745 if statusInfo.Message != s.info { 746 c.Logf("want unit status info %q, got %q; still waiting", s.info, statusInfo.Message) 747 continue 748 } 749 if s.data != nil { 750 if len(statusInfo.Data) != len(s.data) { 751 c.Logf("want %d status data value(s), got %d; still waiting", len(s.data), len(statusInfo.Data)) 752 continue 753 } 754 for key, value := range s.data { 755 if statusInfo.Data[key] != value { 756 c.Logf("want status data value %q for key %q, got %q; still waiting", 757 value, key, statusInfo.Data[key]) 758 continue 759 } 760 } 761 } 762 return 763 case <-timeout: 764 c.Fatalf("never reached desired status") 765 } 766 } 767 } 768 769 type waitHooks []string 770 771 func (s waitHooks) step(c *gc.C, ctx *context) { 772 if len(s) == 0 { 773 // Give unwanted hooks a moment to run... 774 ctx.s.BackingState.StartSync() 775 time.Sleep(coretesting.ShortWait) 776 } 777 ctx.hooks = append(ctx.hooks, s...) 778 c.Logf("waiting for hooks: %#v", ctx.hooks) 779 match, overshoot := ctx.matchHooks(c) 780 if overshoot && len(s) == 0 { 781 c.Fatalf("ran more hooks than expected") 782 } 783 if match { 784 return 785 } 786 timeout := time.After(worstCase) 787 for { 788 ctx.s.BackingState.StartSync() 789 select { 790 case <-time.After(coretesting.ShortWait): 791 if match, _ = ctx.matchHooks(c); match { 792 return 793 } 794 case <-timeout: 795 c.Fatalf("never got expected hooks") 796 } 797 } 798 } 799 800 type actionResult struct { 801 name string 802 results map[string]interface{} 803 status string 804 message string 805 } 806 807 type waitActionResults struct { 808 expectedResults []actionResult 809 } 810 811 func (s waitActionResults) step(c *gc.C, ctx *context) { 812 resultsWatcher := ctx.st.WatchActionResults() 813 defer func() { 814 c.Assert(resultsWatcher.Stop(), gc.IsNil) 815 }() 816 timeout := time.After(worstCase) 817 for { 818 ctx.s.BackingState.StartSync() 819 select { 820 case <-time.After(coretesting.ShortWait): 821 continue 822 case <-timeout: 823 c.Fatalf("timed out waiting for action results") 824 case changes, ok := <-resultsWatcher.Changes(): 825 c.Logf("Got changes: %#v", changes) 826 c.Assert(ok, jc.IsTrue) 827 stateActionResults, err := ctx.unit.CompletedActions() 828 c.Assert(err, jc.ErrorIsNil) 829 if len(stateActionResults) != len(s.expectedResults) { 830 continue 831 } 832 actualResults := make([]actionResult, len(stateActionResults)) 833 for i, result := range stateActionResults { 834 results, message := result.Results() 835 actualResults[i] = actionResult{ 836 name: result.Name(), 837 results: results, 838 status: string(result.Status()), 839 message: message, 840 } 841 } 842 assertActionResultsMatch(c, actualResults, s.expectedResults) 843 return 844 } 845 } 846 } 847 848 func assertActionResultsMatch(c *gc.C, actualIn []actionResult, expectIn []actionResult) { 849 matches := 0 850 desiredMatches := len(actualIn) 851 c.Assert(len(actualIn), gc.Equals, len(expectIn)) 852 findMatch: 853 for _, expectedItem := range expectIn { 854 // find expectedItem in actualIn 855 for j, actualItem := range actualIn { 856 // If we find a match, remove both items from their 857 // respective slices, increment match count, and restart. 858 if reflect.DeepEqual(actualItem, expectedItem) { 859 actualIn = append(actualIn[:j], actualIn[j+1:]...) 860 matches++ 861 continue findMatch 862 } 863 } 864 // if we finish the whole thing without finding a match, we failed. 865 c.Assert(actualIn, jc.DeepEquals, expectIn) 866 } 867 868 c.Assert(matches, gc.Equals, desiredMatches) 869 } 870 871 type verifyNoActionResults struct{} 872 873 func (s verifyNoActionResults) step(c *gc.C, ctx *context) { 874 time.Sleep(coretesting.ShortWait) 875 result, err := ctx.unit.CompletedActions() 876 c.Assert(err, jc.ErrorIsNil) 877 c.Assert(result, gc.HasLen, 0) 878 } 879 880 type fixHook struct { 881 name string 882 } 883 884 func (s fixHook) step(c *gc.C, ctx *context) { 885 path := filepath.Join(ctx.path, "charm", "hooks", s.name) 886 ctx.writeHook(c, path, true) 887 } 888 889 type changeMeterStatus struct { 890 code string 891 info string 892 } 893 894 func (s changeMeterStatus) step(c *gc.C, ctx *context) { 895 err := ctx.unit.SetMeterStatus(s.code, s.info) 896 c.Assert(err, jc.ErrorIsNil) 897 } 898 899 type collectMetricsTick struct { 900 expectFail bool 901 } 902 903 func (s collectMetricsTick) step(c *gc.C, ctx *context) { 904 err := ctx.collectMetricsTicker.Tick() 905 if s.expectFail { 906 c.Assert(err, gc.ErrorMatches, "ticker channel blocked") 907 } else { 908 c.Assert(err, jc.ErrorIsNil) 909 } 910 } 911 912 type updateStatusHookTick struct{} 913 914 func (s updateStatusHookTick) step(c *gc.C, ctx *context) { 915 err := ctx.updateStatusHookTicker.Tick() 916 c.Assert(err, jc.ErrorIsNil) 917 } 918 919 type sendMetricsTick struct { 920 expectFail bool 921 } 922 923 func (s sendMetricsTick) step(c *gc.C, ctx *context) { 924 err := ctx.sendMetricsTicker.Tick() 925 if s.expectFail { 926 c.Assert(err, gc.ErrorMatches, "ticker channel blocked") 927 928 } else { 929 c.Assert(err, jc.ErrorIsNil) 930 } 931 } 932 933 type addMetrics struct { 934 values []string 935 } 936 937 func (s addMetrics) step(c *gc.C, ctx *context) { 938 var declaredMetrics map[string]corecharm.Metric 939 if ctx.sch.Metrics() != nil { 940 declaredMetrics = ctx.sch.Metrics().Metrics 941 } 942 spoolDir := filepath.Join(ctx.path, "state", "spool", "metrics") 943 944 recorder, err := metrics.NewJSONMetricRecorder(spoolDir, declaredMetrics, ctx.sch.URL().String()) 945 c.Assert(err, jc.ErrorIsNil) 946 947 for _, value := range s.values { 948 recorder.AddMetric("pings", value, time.Now()) 949 } 950 951 err = recorder.Close() 952 c.Assert(err, jc.ErrorIsNil) 953 } 954 955 type checkStateMetrics struct { 956 number int 957 values []string 958 } 959 960 func (s checkStateMetrics) step(c *gc.C, ctx *context) { 961 timeout := time.After(worstCase) 962 for { 963 select { 964 case <-timeout: 965 c.Fatalf("specified number of metric batches not received by the state server") 966 case <-time.After(coretesting.ShortWait): 967 batches, err := ctx.st.MetricBatches() 968 c.Assert(err, jc.ErrorIsNil) 969 if len(batches) != s.number { 970 continue 971 } 972 for _, value := range s.values { 973 found := false 974 for _, batch := range batches { 975 for _, metric := range batch.Metrics() { 976 if metric.Key == "pings" && metric.Value == value { 977 found = true 978 } 979 } 980 } 981 c.Assert(found, gc.Equals, true) 982 } 983 return 984 } 985 } 986 } 987 988 type changeConfig map[string]interface{} 989 990 func (s changeConfig) step(c *gc.C, ctx *context) { 991 err := ctx.svc.UpdateConfigSettings(corecharm.Settings(s)) 992 c.Assert(err, jc.ErrorIsNil) 993 } 994 995 type addAction struct { 996 name string 997 params map[string]interface{} 998 } 999 1000 func (s addAction) step(c *gc.C, ctx *context) { 1001 _, err := ctx.st.EnqueueAction(ctx.unit.Tag(), s.name, s.params) 1002 c.Assert(err, jc.ErrorIsNil) 1003 } 1004 1005 type upgradeCharm struct { 1006 revision int 1007 forced bool 1008 } 1009 1010 func (s upgradeCharm) step(c *gc.C, ctx *context) { 1011 curl := curl(s.revision) 1012 sch, err := ctx.st.Charm(curl) 1013 c.Assert(err, jc.ErrorIsNil) 1014 err = ctx.svc.SetCharm(sch, s.forced) 1015 c.Assert(err, jc.ErrorIsNil) 1016 serveCharm{}.step(c, ctx) 1017 } 1018 1019 type verifyCharm struct { 1020 revision int 1021 attemptedRevision int 1022 checkFiles ft.Entries 1023 } 1024 1025 func (s verifyCharm) step(c *gc.C, ctx *context) { 1026 s.checkFiles.Check(c, filepath.Join(ctx.path, "charm")) 1027 path := filepath.Join(ctx.path, "charm", "revision") 1028 content, err := ioutil.ReadFile(path) 1029 c.Assert(err, jc.ErrorIsNil) 1030 c.Assert(string(content), gc.Equals, strconv.Itoa(s.revision)) 1031 checkRevision := s.revision 1032 if s.attemptedRevision > checkRevision { 1033 checkRevision = s.attemptedRevision 1034 } 1035 err = ctx.unit.Refresh() 1036 c.Assert(err, jc.ErrorIsNil) 1037 url, ok := ctx.unit.CharmURL() 1038 c.Assert(ok, jc.IsTrue) 1039 c.Assert(url, gc.DeepEquals, curl(checkRevision)) 1040 } 1041 1042 type startUpgradeError struct{} 1043 1044 func (s startUpgradeError) step(c *gc.C, ctx *context) { 1045 steps := []stepper{ 1046 createCharm{ 1047 customize: func(c *gc.C, ctx *context, path string) { 1048 appendHook(c, path, "start", "chmod 555 $CHARM_DIR") 1049 }, 1050 }, 1051 serveCharm{}, 1052 createUniter{}, 1053 waitUnitAgent{ 1054 status: params.StatusIdle, 1055 }, 1056 waitHooks(startupHooks(false)), 1057 verifyCharm{}, 1058 1059 createCharm{revision: 1}, 1060 serveCharm{}, 1061 upgradeCharm{revision: 1}, 1062 waitUnitAgent{ 1063 statusGetter: unitStatusGetter, 1064 status: params.StatusError, 1065 info: "upgrade failed", 1066 charm: 1, 1067 }, 1068 verifyWaiting{}, 1069 verifyCharm{attemptedRevision: 1}, 1070 } 1071 for _, s_ := range steps { 1072 step(c, ctx, s_) 1073 } 1074 } 1075 1076 type verifyWaitingUpgradeError struct { 1077 revision int 1078 } 1079 1080 func (s verifyWaitingUpgradeError) step(c *gc.C, ctx *context) { 1081 verifyCharmSteps := []stepper{ 1082 waitUnitAgent{ 1083 statusGetter: unitStatusGetter, 1084 status: params.StatusError, 1085 info: "upgrade failed", 1086 charm: s.revision, 1087 }, 1088 verifyCharm{attemptedRevision: s.revision}, 1089 } 1090 verifyWaitingSteps := []stepper{ 1091 stopUniter{}, 1092 custom{func(c *gc.C, ctx *context) { 1093 // By setting status to Started, and waiting for the restarted uniter 1094 // to reset the error status, we can avoid a race in which a subsequent 1095 // fixUpgradeError lands just before the restarting uniter retries the 1096 // upgrade; and thus puts us in an unexpected state for future steps. 1097 ctx.unit.SetAgentStatus(state.StatusActive, "", nil) 1098 }}, 1099 startUniter{}, 1100 } 1101 allSteps := append(verifyCharmSteps, verifyWaitingSteps...) 1102 allSteps = append(allSteps, verifyCharmSteps...) 1103 for _, s_ := range allSteps { 1104 step(c, ctx, s_) 1105 } 1106 } 1107 1108 type fixUpgradeError struct{} 1109 1110 func (s fixUpgradeError) step(c *gc.C, ctx *context) { 1111 charmPath := filepath.Join(ctx.path, "charm") 1112 err := os.Chmod(charmPath, 0755) 1113 c.Assert(err, jc.ErrorIsNil) 1114 } 1115 1116 type addRelation struct { 1117 waitJoin bool 1118 } 1119 1120 func (s addRelation) step(c *gc.C, ctx *context) { 1121 if ctx.relation != nil { 1122 panic("don't add two relations!") 1123 } 1124 if ctx.relatedSvc == nil { 1125 ctx.relatedSvc = ctx.s.AddTestingService(c, "mysql", ctx.s.AddTestingCharm(c, "mysql")) 1126 } 1127 eps, err := ctx.st.InferEndpoints("u", "mysql") 1128 c.Assert(err, jc.ErrorIsNil) 1129 ctx.relation, err = ctx.st.AddRelation(eps...) 1130 c.Assert(err, jc.ErrorIsNil) 1131 ctx.relationUnits = map[string]*state.RelationUnit{} 1132 if !s.waitJoin { 1133 return 1134 } 1135 1136 // It's hard to do this properly (watching scope) without perturbing other tests. 1137 ru, err := ctx.relation.Unit(ctx.unit) 1138 c.Assert(err, jc.ErrorIsNil) 1139 timeout := time.After(worstCase) 1140 for { 1141 c.Logf("waiting to join relation") 1142 select { 1143 case <-timeout: 1144 c.Fatalf("failed to join relation") 1145 case <-time.After(coretesting.ShortWait): 1146 inScope, err := ru.InScope() 1147 c.Assert(err, jc.ErrorIsNil) 1148 if inScope { 1149 return 1150 } 1151 } 1152 } 1153 } 1154 1155 type addRelationUnit struct{} 1156 1157 func (s addRelationUnit) step(c *gc.C, ctx *context) { 1158 u, err := ctx.relatedSvc.AddUnit() 1159 c.Assert(err, jc.ErrorIsNil) 1160 ru, err := ctx.relation.Unit(u) 1161 c.Assert(err, jc.ErrorIsNil) 1162 err = ru.EnterScope(nil) 1163 c.Assert(err, jc.ErrorIsNil) 1164 ctx.relationUnits[u.Name()] = ru 1165 } 1166 1167 type changeRelationUnit struct { 1168 name string 1169 } 1170 1171 func (s changeRelationUnit) step(c *gc.C, ctx *context) { 1172 settings, err := ctx.relationUnits[s.name].Settings() 1173 c.Assert(err, jc.ErrorIsNil) 1174 key := "madness?" 1175 raw, _ := settings.Get(key) 1176 val, _ := raw.(string) 1177 if val == "" { 1178 val = "this is juju" 1179 } else { 1180 val += "u" 1181 } 1182 settings.Set(key, val) 1183 _, err = settings.Write() 1184 c.Assert(err, jc.ErrorIsNil) 1185 } 1186 1187 type removeRelationUnit struct { 1188 name string 1189 } 1190 1191 func (s removeRelationUnit) step(c *gc.C, ctx *context) { 1192 err := ctx.relationUnits[s.name].LeaveScope() 1193 c.Assert(err, jc.ErrorIsNil) 1194 ctx.relationUnits[s.name] = nil 1195 } 1196 1197 type relationState struct { 1198 removed bool 1199 life state.Life 1200 } 1201 1202 func (s relationState) step(c *gc.C, ctx *context) { 1203 err := ctx.relation.Refresh() 1204 if s.removed { 1205 c.Assert(err, jc.Satisfies, errors.IsNotFound) 1206 return 1207 } 1208 c.Assert(err, jc.ErrorIsNil) 1209 c.Assert(ctx.relation.Life(), gc.Equals, s.life) 1210 1211 } 1212 1213 type addSubordinateRelation struct { 1214 ifce string 1215 } 1216 1217 func (s addSubordinateRelation) step(c *gc.C, ctx *context) { 1218 if _, err := ctx.st.Service("logging"); errors.IsNotFound(err) { 1219 ctx.s.AddTestingService(c, "logging", ctx.s.AddTestingCharm(c, "logging")) 1220 } 1221 eps, err := ctx.st.InferEndpoints("logging", "u:"+s.ifce) 1222 c.Assert(err, jc.ErrorIsNil) 1223 _, err = ctx.st.AddRelation(eps...) 1224 c.Assert(err, jc.ErrorIsNil) 1225 } 1226 1227 type removeSubordinateRelation struct { 1228 ifce string 1229 } 1230 1231 func (s removeSubordinateRelation) step(c *gc.C, ctx *context) { 1232 eps, err := ctx.st.InferEndpoints("logging", "u:"+s.ifce) 1233 c.Assert(err, jc.ErrorIsNil) 1234 rel, err := ctx.st.EndpointsRelation(eps...) 1235 c.Assert(err, jc.ErrorIsNil) 1236 err = rel.Destroy() 1237 c.Assert(err, jc.ErrorIsNil) 1238 } 1239 1240 type waitSubordinateExists struct { 1241 name string 1242 } 1243 1244 func (s waitSubordinateExists) step(c *gc.C, ctx *context) { 1245 timeout := time.After(worstCase) 1246 for { 1247 ctx.s.BackingState.StartSync() 1248 select { 1249 case <-timeout: 1250 c.Fatalf("subordinate was not created") 1251 case <-time.After(coretesting.ShortWait): 1252 var err error 1253 ctx.subordinate, err = ctx.st.Unit(s.name) 1254 if errors.IsNotFound(err) { 1255 continue 1256 } 1257 c.Assert(err, jc.ErrorIsNil) 1258 return 1259 } 1260 } 1261 } 1262 1263 type waitSubordinateDying struct{} 1264 1265 func (waitSubordinateDying) step(c *gc.C, ctx *context) { 1266 timeout := time.After(worstCase) 1267 for { 1268 ctx.s.BackingState.StartSync() 1269 select { 1270 case <-timeout: 1271 c.Fatalf("subordinate was not made Dying") 1272 case <-time.After(coretesting.ShortWait): 1273 err := ctx.subordinate.Refresh() 1274 c.Assert(err, jc.ErrorIsNil) 1275 if ctx.subordinate.Life() != state.Dying { 1276 continue 1277 } 1278 } 1279 break 1280 } 1281 } 1282 1283 type removeSubordinate struct{} 1284 1285 func (removeSubordinate) step(c *gc.C, ctx *context) { 1286 err := ctx.subordinate.EnsureDead() 1287 c.Assert(err, jc.ErrorIsNil) 1288 err = ctx.subordinate.Remove() 1289 c.Assert(err, jc.ErrorIsNil) 1290 ctx.subordinate = nil 1291 } 1292 1293 type assertYaml struct { 1294 path string 1295 expect map[string]interface{} 1296 } 1297 1298 func (s assertYaml) step(c *gc.C, ctx *context) { 1299 data, err := ioutil.ReadFile(filepath.Join(ctx.path, s.path)) 1300 c.Assert(err, jc.ErrorIsNil) 1301 actual := make(map[string]interface{}) 1302 err = goyaml.Unmarshal(data, &actual) 1303 c.Assert(err, jc.ErrorIsNil) 1304 c.Assert(actual, gc.DeepEquals, s.expect) 1305 } 1306 1307 type writeFile struct { 1308 path string 1309 mode os.FileMode 1310 } 1311 1312 func (s writeFile) step(c *gc.C, ctx *context) { 1313 path := filepath.Join(ctx.path, s.path) 1314 dir := filepath.Dir(path) 1315 err := os.MkdirAll(dir, 0755) 1316 c.Assert(err, jc.ErrorIsNil) 1317 err = ioutil.WriteFile(path, nil, s.mode) 1318 c.Assert(err, jc.ErrorIsNil) 1319 } 1320 1321 type chmod struct { 1322 path string 1323 mode os.FileMode 1324 } 1325 1326 func (s chmod) step(c *gc.C, ctx *context) { 1327 path := filepath.Join(ctx.path, s.path) 1328 err := os.Chmod(path, s.mode) 1329 c.Assert(err, jc.ErrorIsNil) 1330 } 1331 1332 type custom struct { 1333 f func(*gc.C, *context) 1334 } 1335 1336 func (s custom) step(c *gc.C, ctx *context) { 1337 s.f(c, ctx) 1338 } 1339 1340 var serviceDying = custom{func(c *gc.C, ctx *context) { 1341 c.Assert(ctx.svc.Destroy(), gc.IsNil) 1342 }} 1343 1344 var relationDying = custom{func(c *gc.C, ctx *context) { 1345 c.Assert(ctx.relation.Destroy(), gc.IsNil) 1346 }} 1347 1348 var unitDying = custom{func(c *gc.C, ctx *context) { 1349 c.Assert(ctx.unit.Destroy(), gc.IsNil) 1350 }} 1351 1352 var unitDead = custom{func(c *gc.C, ctx *context) { 1353 c.Assert(ctx.unit.EnsureDead(), gc.IsNil) 1354 }} 1355 1356 var subordinateDying = custom{func(c *gc.C, ctx *context) { 1357 c.Assert(ctx.subordinate.Destroy(), gc.IsNil) 1358 }} 1359 1360 func curl(revision int) *corecharm.URL { 1361 return corecharm.MustParseURL("cs:quantal/wordpress").WithRevision(revision) 1362 } 1363 1364 func appendHook(c *gc.C, charm, name, data string) { 1365 path := filepath.Join(charm, "hooks", name+cmdSuffix) 1366 f, err := os.OpenFile(path, os.O_WRONLY|os.O_APPEND, 0755) 1367 c.Assert(err, jc.ErrorIsNil) 1368 defer f.Close() 1369 _, err = f.Write([]byte(data)) 1370 c.Assert(err, jc.ErrorIsNil) 1371 } 1372 1373 func renameRelation(c *gc.C, charmPath, oldName, newName string) { 1374 path := filepath.Join(charmPath, "metadata.yaml") 1375 f, err := os.Open(path) 1376 c.Assert(err, jc.ErrorIsNil) 1377 defer f.Close() 1378 meta, err := corecharm.ReadMeta(f) 1379 c.Assert(err, jc.ErrorIsNil) 1380 1381 replace := func(what map[string]corecharm.Relation) bool { 1382 for relName, relation := range what { 1383 if relName == oldName { 1384 what[newName] = relation 1385 delete(what, oldName) 1386 return true 1387 } 1388 } 1389 return false 1390 } 1391 replaced := replace(meta.Provides) || replace(meta.Requires) || replace(meta.Peers) 1392 c.Assert(replaced, gc.Equals, true, gc.Commentf("charm %q does not implement relation %q", charmPath, oldName)) 1393 1394 newmeta, err := goyaml.Marshal(meta) 1395 c.Assert(err, jc.ErrorIsNil) 1396 ioutil.WriteFile(path, newmeta, 0644) 1397 1398 f, err = os.Open(path) 1399 c.Assert(err, jc.ErrorIsNil) 1400 defer f.Close() 1401 _, err = corecharm.ReadMeta(f) 1402 c.Assert(err, jc.ErrorIsNil) 1403 } 1404 1405 func createHookLock(c *gc.C, dataDir string) *fslock.Lock { 1406 lockDir := filepath.Join(dataDir, "locks") 1407 lock, err := fslock.NewLock(lockDir, "uniter-hook-execution") 1408 c.Assert(err, jc.ErrorIsNil) 1409 return lock 1410 } 1411 1412 type acquireHookSyncLock struct { 1413 message string 1414 } 1415 1416 func (s acquireHookSyncLock) step(c *gc.C, ctx *context) { 1417 lock := createHookLock(c, ctx.dataDir) 1418 c.Assert(lock.IsLocked(), jc.IsFalse) 1419 err := lock.Lock(s.message) 1420 c.Assert(err, jc.ErrorIsNil) 1421 } 1422 1423 var releaseHookSyncLock = custom{func(c *gc.C, ctx *context) { 1424 lock := createHookLock(c, ctx.dataDir) 1425 // Force the release. 1426 err := lock.BreakLock() 1427 c.Assert(err, jc.ErrorIsNil) 1428 }} 1429 1430 var verifyHookSyncLockUnlocked = custom{func(c *gc.C, ctx *context) { 1431 lock := createHookLock(c, ctx.dataDir) 1432 c.Assert(lock.IsLocked(), jc.IsFalse) 1433 }} 1434 1435 var verifyHookSyncLockLocked = custom{func(c *gc.C, ctx *context) { 1436 lock := createHookLock(c, ctx.dataDir) 1437 c.Assert(lock.IsLocked(), jc.IsTrue) 1438 }} 1439 1440 type setProxySettings proxy.Settings 1441 1442 func (s setProxySettings) step(c *gc.C, ctx *context) { 1443 attrs := map[string]interface{}{ 1444 "http-proxy": s.Http, 1445 "https-proxy": s.Https, 1446 "ftp-proxy": s.Ftp, 1447 "no-proxy": s.NoProxy, 1448 } 1449 err := ctx.st.UpdateEnvironConfig(attrs, nil, nil) 1450 c.Assert(err, jc.ErrorIsNil) 1451 } 1452 1453 type relationRunCommands []string 1454 1455 func (cmds relationRunCommands) step(c *gc.C, ctx *context) { 1456 commands := strings.Join(cmds, "\n") 1457 args := uniter.RunCommandsArgs{ 1458 Commands: commands, 1459 RelationId: 0, 1460 RemoteUnitName: "", 1461 } 1462 result, err := ctx.uniter.RunCommands(args) 1463 c.Assert(err, jc.ErrorIsNil) 1464 c.Check(result.Code, gc.Equals, 0) 1465 c.Check(string(result.Stdout), gc.Equals, "") 1466 c.Check(string(result.Stderr), gc.Equals, "") 1467 } 1468 1469 type runCommands []string 1470 1471 func (cmds runCommands) step(c *gc.C, ctx *context) { 1472 commands := strings.Join(cmds, "\n") 1473 args := uniter.RunCommandsArgs{ 1474 Commands: commands, 1475 RelationId: -1, 1476 RemoteUnitName: "", 1477 } 1478 result, err := ctx.uniter.RunCommands(args) 1479 c.Assert(err, jc.ErrorIsNil) 1480 c.Check(result.Code, gc.Equals, 0) 1481 c.Check(string(result.Stdout), gc.Equals, "") 1482 c.Check(string(result.Stderr), gc.Equals, "") 1483 } 1484 1485 type asyncRunCommands []string 1486 1487 func (cmds asyncRunCommands) step(c *gc.C, ctx *context) { 1488 commands := strings.Join(cmds, "\n") 1489 args := uniter.RunCommandsArgs{ 1490 Commands: commands, 1491 RelationId: -1, 1492 RemoteUnitName: "", 1493 } 1494 1495 var socketPath string 1496 if runtime.GOOS == "windows" { 1497 socketPath = `\\.\pipe\unit-u-0-run` 1498 } else { 1499 socketPath = filepath.Join(ctx.path, "run.socket") 1500 } 1501 1502 ctx.wg.Add(1) 1503 go func() { 1504 defer ctx.wg.Done() 1505 // make sure the socket exists 1506 client, err := sockets.Dial(socketPath) 1507 c.Assert(err, jc.ErrorIsNil) 1508 defer client.Close() 1509 1510 var result utilexec.ExecResponse 1511 err = client.Call(uniter.JujuRunEndpoint, args, &result) 1512 c.Assert(err, jc.ErrorIsNil) 1513 c.Check(result.Code, gc.Equals, 0) 1514 c.Check(string(result.Stdout), gc.Equals, "") 1515 c.Check(string(result.Stderr), gc.Equals, "") 1516 }() 1517 } 1518 1519 type waitContextWaitGroup struct{} 1520 1521 func (waitContextWaitGroup) step(c *gc.C, ctx *context) { 1522 ctx.wg.Wait() 1523 } 1524 1525 type forceMinion struct{} 1526 1527 func (forceMinion) step(c *gc.C, ctx *context) { 1528 ctx.leaderTracker.setLeader(c, false) 1529 } 1530 1531 type forceLeader struct{} 1532 1533 func (forceLeader) step(c *gc.C, ctx *context) { 1534 ctx.leaderTracker.setLeader(c, true) 1535 } 1536 1537 func newMockLeaderTracker(ctx *context) *mockLeaderTracker { 1538 return &mockLeaderTracker{ 1539 ctx: ctx, 1540 } 1541 } 1542 1543 type mockLeaderTracker struct { 1544 mu sync.Mutex 1545 ctx *context 1546 isLeader bool 1547 waiting []chan struct{} 1548 } 1549 1550 func (mock *mockLeaderTracker) ServiceName() string { 1551 return mock.ctx.svc.Name() 1552 } 1553 1554 func (mock *mockLeaderTracker) ClaimDuration() time.Duration { 1555 return 30 * time.Second 1556 } 1557 1558 func (mock *mockLeaderTracker) ClaimLeader() leadership.Ticket { 1559 mock.mu.Lock() 1560 defer mock.mu.Unlock() 1561 if mock.isLeader { 1562 return fastTicket{true} 1563 } 1564 return fastTicket{} 1565 } 1566 1567 func (mock *mockLeaderTracker) WaitLeader() leadership.Ticket { 1568 mock.mu.Lock() 1569 defer mock.mu.Unlock() 1570 if mock.isLeader { 1571 return fastTicket{} 1572 } 1573 return mock.waitTicket() 1574 } 1575 1576 func (mock *mockLeaderTracker) WaitMinion() leadership.Ticket { 1577 mock.mu.Lock() 1578 defer mock.mu.Unlock() 1579 if !mock.isLeader { 1580 return fastTicket{} 1581 } 1582 return mock.waitTicket() 1583 } 1584 1585 func (mock *mockLeaderTracker) waitTicket() leadership.Ticket { 1586 // very internal, expects mu to be locked already 1587 ch := make(chan struct{}) 1588 mock.waiting = append(mock.waiting, ch) 1589 return waitTicket{ch} 1590 } 1591 1592 func (mock *mockLeaderTracker) setLeader(c *gc.C, isLeader bool) { 1593 mock.mu.Lock() 1594 defer mock.mu.Unlock() 1595 if mock.isLeader == isLeader { 1596 return 1597 } 1598 if isLeader { 1599 err := mock.ctx.leaderClaimer.ClaimLeadership( 1600 mock.ctx.svc.Name(), mock.ctx.unit.Name(), time.Minute, 1601 ) 1602 c.Assert(err, jc.ErrorIsNil) 1603 } else { 1604 leaseClock.Advance(61 * time.Second) 1605 time.Sleep(coretesting.ShortWait) 1606 } 1607 mock.isLeader = isLeader 1608 for _, ch := range mock.waiting { 1609 close(ch) 1610 } 1611 mock.waiting = nil 1612 } 1613 1614 type waitTicket struct { 1615 ch chan struct{} 1616 } 1617 1618 func (t waitTicket) Ready() <-chan struct{} { 1619 return t.ch 1620 } 1621 1622 func (t waitTicket) Wait() bool { 1623 return false 1624 } 1625 1626 type fastTicket struct { 1627 value bool 1628 } 1629 1630 func (fastTicket) Ready() <-chan struct{} { 1631 ch := make(chan struct{}) 1632 close(ch) 1633 return ch 1634 } 1635 1636 func (t fastTicket) Wait() bool { 1637 return t.value 1638 } 1639 1640 type setLeaderSettings map[string]string 1641 1642 func (s setLeaderSettings) step(c *gc.C, ctx *context) { 1643 // We do this directly on State, not the API, so we don't have to worry 1644 // about getting an API conn for whatever unit's meant to be leader. 1645 err := ctx.svc.UpdateLeaderSettings(successToken{}, s) 1646 c.Assert(err, jc.ErrorIsNil) 1647 ctx.s.BackingState.StartSync() 1648 } 1649 1650 type successToken struct{} 1651 1652 func (successToken) Check(interface{}) error { 1653 return nil 1654 } 1655 1656 type verifyLeaderSettings map[string]string 1657 1658 func (verify verifyLeaderSettings) step(c *gc.C, ctx *context) { 1659 actual, err := ctx.svc.LeaderSettings() 1660 c.Assert(err, jc.ErrorIsNil) 1661 c.Assert(actual, jc.DeepEquals, map[string]string(verify)) 1662 } 1663 1664 type verifyFile struct { 1665 filename string 1666 content string 1667 } 1668 1669 func (verify verifyFile) fileExists() bool { 1670 _, err := os.Stat(verify.filename) 1671 return err == nil 1672 } 1673 1674 func (verify verifyFile) checkContent(c *gc.C) { 1675 content, err := ioutil.ReadFile(verify.filename) 1676 c.Assert(err, jc.ErrorIsNil) 1677 c.Assert(string(content), gc.Equals, verify.content) 1678 } 1679 1680 func (verify verifyFile) step(c *gc.C, ctx *context) { 1681 if verify.fileExists() { 1682 verify.checkContent(c) 1683 return 1684 } 1685 c.Logf("waiting for file: %s", verify.filename) 1686 timeout := time.After(worstCase) 1687 for { 1688 select { 1689 case <-time.After(coretesting.ShortWait): 1690 if verify.fileExists() { 1691 verify.checkContent(c) 1692 return 1693 } 1694 case <-timeout: 1695 c.Fatalf("file does not exist") 1696 } 1697 } 1698 } 1699 1700 // verify that the file does not exist 1701 type verifyNoFile struct { 1702 filename string 1703 } 1704 1705 func (verify verifyNoFile) step(c *gc.C, ctx *context) { 1706 c.Assert(verify.filename, jc.DoesNotExist) 1707 // Wait a short time and check again. 1708 time.Sleep(coretesting.ShortWait) 1709 c.Assert(verify.filename, jc.DoesNotExist) 1710 } 1711 1712 // prepareGitUniter runs a sequence of uniter tests with the manifest deployer 1713 // replacement logic patched out, simulating the effect of running an older 1714 // version of juju that exclusively used a git deployer. This is useful both 1715 // for testing the new deployer-replacement code *and* for running the old 1716 // tests against the new, patched code to check that the tweaks made to 1717 // accommodate the manifest deployer do not change the original behaviour as 1718 // simulated by the patched-out code. 1719 type prepareGitUniter struct { 1720 prepSteps []stepper 1721 } 1722 1723 func (s prepareGitUniter) step(c *gc.C, ctx *context) { 1724 c.Assert(ctx.uniter, gc.IsNil, gc.Commentf("please don't try to patch stuff while the uniter's running")) 1725 newDeployer := func(charmPath, dataPath string, bundles charm.BundleReader) (charm.Deployer, error) { 1726 return charm.NewGitDeployer(charmPath, dataPath, bundles), nil 1727 } 1728 restoreNewDeployer := gt.PatchValue(&charm.NewDeployer, newDeployer) 1729 defer restoreNewDeployer() 1730 1731 fixDeployer := func(deployer *charm.Deployer) error { 1732 return nil 1733 } 1734 restoreFixDeployer := gt.PatchValue(&charm.FixDeployer, fixDeployer) 1735 defer restoreFixDeployer() 1736 1737 for _, prepStep := range s.prepSteps { 1738 step(c, ctx, prepStep) 1739 } 1740 if ctx.uniter != nil { 1741 step(c, ctx, stopUniter{}) 1742 } 1743 } 1744 1745 func ugt(summary string, steps ...stepper) uniterTest { 1746 return ut(summary, prepareGitUniter{steps}) 1747 } 1748 1749 type verifyGitCharm struct { 1750 revision int 1751 dirty bool 1752 } 1753 1754 func (s verifyGitCharm) step(c *gc.C, ctx *context) { 1755 charmPath := filepath.Join(ctx.path, "charm") 1756 if !s.dirty { 1757 revisionPath := filepath.Join(charmPath, "revision") 1758 content, err := ioutil.ReadFile(revisionPath) 1759 c.Assert(err, jc.ErrorIsNil) 1760 c.Assert(string(content), gc.Equals, strconv.Itoa(s.revision)) 1761 err = ctx.unit.Refresh() 1762 c.Assert(err, jc.ErrorIsNil) 1763 url, ok := ctx.unit.CharmURL() 1764 c.Assert(ok, jc.IsTrue) 1765 c.Assert(url, gc.DeepEquals, curl(s.revision)) 1766 } 1767 1768 // Before we try to check the git status, make sure expected hooks are all 1769 // complete, to prevent the test and the uniter interfering with each other. 1770 step(c, ctx, waitHooks{}) 1771 step(c, ctx, waitHooks{}) 1772 cmd := exec.Command("git", "status") 1773 cmd.Dir = filepath.Join(ctx.path, "charm") 1774 out, err := cmd.CombinedOutput() 1775 c.Assert(err, jc.ErrorIsNil) 1776 cmp := gc.Matches 1777 if s.dirty { 1778 cmp = gc.Not(gc.Matches) 1779 } 1780 c.Assert(string(out), cmp, "(# )?On branch master\nnothing to commit.*\n") 1781 } 1782 1783 type startGitUpgradeError struct{} 1784 1785 func (s startGitUpgradeError) step(c *gc.C, ctx *context) { 1786 steps := []stepper{ 1787 createCharm{ 1788 customize: func(c *gc.C, ctx *context, path string) { 1789 appendHook(c, path, "start", "echo STARTDATA > data") 1790 }, 1791 }, 1792 serveCharm{}, 1793 createUniter{}, 1794 waitUnitAgent{ 1795 status: params.StatusIdle, 1796 }, 1797 waitHooks(startupHooks(false)), 1798 verifyGitCharm{dirty: true}, 1799 1800 createCharm{ 1801 revision: 1, 1802 customize: func(c *gc.C, ctx *context, path string) { 1803 ft.File{"data", "<nelson>ha ha</nelson>", 0644}.Create(c, path) 1804 ft.File{"ignore", "anything", 0644}.Create(c, path) 1805 }, 1806 }, 1807 serveCharm{}, 1808 upgradeCharm{revision: 1}, 1809 waitUnitAgent{ 1810 statusGetter: unitStatusGetter, 1811 status: params.StatusError, 1812 info: "upgrade failed", 1813 charm: 1, 1814 }, 1815 verifyWaiting{}, 1816 verifyGitCharm{dirty: true}, 1817 } 1818 for _, s_ := range steps { 1819 step(c, ctx, s_) 1820 } 1821 } 1822 1823 type provisionStorage struct{} 1824 1825 func (s provisionStorage) step(c *gc.C, ctx *context) { 1826 storageAttachments, err := ctx.st.UnitStorageAttachments(ctx.unit.UnitTag()) 1827 c.Assert(err, jc.ErrorIsNil) 1828 c.Assert(storageAttachments, gc.HasLen, 1) 1829 1830 filesystem, err := ctx.st.StorageInstanceFilesystem(storageAttachments[0].StorageInstance()) 1831 c.Assert(err, jc.ErrorIsNil) 1832 1833 filesystemInfo := state.FilesystemInfo{ 1834 Size: 1024, 1835 FilesystemId: "fs-id", 1836 } 1837 err = ctx.st.SetFilesystemInfo(filesystem.FilesystemTag(), filesystemInfo) 1838 c.Assert(err, jc.ErrorIsNil) 1839 1840 machineId, err := ctx.unit.AssignedMachineId() 1841 c.Assert(err, jc.ErrorIsNil) 1842 1843 filesystemAttachmentInfo := state.FilesystemAttachmentInfo{ 1844 MountPoint: "/srv/wordpress/content", 1845 } 1846 err = ctx.st.SetFilesystemAttachmentInfo( 1847 names.NewMachineTag(machineId), 1848 filesystem.FilesystemTag(), 1849 filesystemAttachmentInfo, 1850 ) 1851 c.Assert(err, jc.ErrorIsNil) 1852 } 1853 1854 type destroyStorageAttachment struct{} 1855 1856 func (s destroyStorageAttachment) step(c *gc.C, ctx *context) { 1857 storageAttachments, err := ctx.st.UnitStorageAttachments(ctx.unit.UnitTag()) 1858 c.Assert(err, jc.ErrorIsNil) 1859 c.Assert(storageAttachments, gc.HasLen, 1) 1860 err = ctx.st.DestroyStorageAttachment( 1861 storageAttachments[0].StorageInstance(), 1862 ctx.unit.UnitTag(), 1863 ) 1864 c.Assert(err, jc.ErrorIsNil) 1865 } 1866 1867 type verifyStorageDetached struct{} 1868 1869 func (s verifyStorageDetached) step(c *gc.C, ctx *context) { 1870 storageAttachments, err := ctx.st.UnitStorageAttachments(ctx.unit.UnitTag()) 1871 c.Assert(err, jc.ErrorIsNil) 1872 c.Assert(storageAttachments, gc.HasLen, 0) 1873 } 1874 1875 type expectError struct { 1876 err string 1877 } 1878 1879 func (s expectError) step(c *gc.C, ctx *context) { 1880 ctx.setExpectedError(s.err) 1881 }