github.com/juju/juju@v0.0.0-20240327075706-a90865de2538/worker/upgradedatabase/worker_test.go (about) 1 // Copyright 2019 Canonical Ltd. 2 // Licensed under the AGPLv3, see LICENCE file for details. 3 4 package upgradedatabase_test 5 6 import ( 7 "fmt" 8 "time" 9 10 "github.com/juju/clock" 11 "github.com/juju/errors" 12 "github.com/juju/loggo" 13 "github.com/juju/names/v5" 14 "github.com/juju/retry" 15 "github.com/juju/testing" 16 jc "github.com/juju/testing/checkers" 17 "github.com/juju/version/v2" 18 "github.com/juju/worker/v3/workertest" 19 "go.uber.org/mock/gomock" 20 gc "gopkg.in/check.v1" 21 22 "github.com/juju/juju/agent" 23 "github.com/juju/juju/core/status" 24 "github.com/juju/juju/state" 25 "github.com/juju/juju/upgrades" 26 jujuversion "github.com/juju/juju/version" 27 "github.com/juju/juju/worker/upgradedatabase" 28 . "github.com/juju/juju/worker/upgradedatabase/mocks" 29 ) 30 31 var ( 32 statusUpgrading = "upgrading database for " + jujuversion.Current.String() 33 statusWaiting = "waiting on primary database upgrade for " + jujuversion.Current.String() 34 statusCompleted = fmt.Sprintf("database upgrade for %v completed", jujuversion.Current) 35 statusConfirmed = fmt.Sprintf("confirmed primary database upgrade for " + jujuversion.Current.String()) 36 37 logRunning = "running database upgrade for %v on mongodb primary" 38 logWaiting = "waiting for database upgrade on mongodb primary" 39 ) 40 41 // baseSuite is embedded in both the worker and manifold tests. 42 // Tests should not go on this suite directly. 43 type baseSuite struct { 44 testing.IsolationSuite 45 46 logger *MockLogger 47 } 48 49 // ignoreLogging turns the suite's mock logger into a sink, with no validation. 50 // Logs are still emitted via the test logger. 51 func (s *baseSuite) ignoreLogging(c *gc.C) { 52 debugIt := func(message string, args ...interface{}) { logIt(c, loggo.DEBUG, message, args) } 53 infoIt := func(message string, args ...interface{}) { logIt(c, loggo.INFO, message, args) } 54 errorIt := func(message string, args ...interface{}) { logIt(c, loggo.ERROR, message, args) } 55 56 e := s.logger.EXPECT() 57 e.Debugf(gomock.Any(), gomock.Any()).AnyTimes().Do(debugIt) 58 e.Infof(gomock.Any(), gomock.Any()).AnyTimes().Do(infoIt) 59 e.Errorf(gomock.Any(), gomock.Any()).AnyTimes().Do(errorIt) 60 } 61 62 func logIt(c *gc.C, level loggo.Level, message string, args interface{}) { 63 var nArgs []interface{} 64 var ok bool 65 if nArgs, ok = args.([]interface{}); ok { 66 nArgs = append([]interface{}{level}, nArgs...) 67 } else { 68 nArgs = append([]interface{}{level}, args) 69 } 70 71 c.Logf("%s "+message, nArgs...) 72 } 73 74 type workerSuite struct { 75 baseSuite 76 77 lock *MockLock 78 agent *MockAgent 79 agentCfg *MockConfig 80 cfgSetter *MockConfigSetter 81 pool *MockPool 82 clock *MockClock 83 upgradeInfo *MockUpgradeInfo 84 watcher *MockNotifyWatcher 85 } 86 87 var _ = gc.Suite(&workerSuite{}) 88 89 func (s *workerSuite) TestValidateConfig(c *gc.C) { 90 defer s.setupMocks(c).Finish() 91 92 cfg := s.getConfig() 93 c.Check(cfg.Validate(), jc.ErrorIsNil) 94 cfg.Tag = names.NewControllerAgentTag("0") 95 c.Check(cfg.Validate(), jc.ErrorIsNil) 96 97 cfg.UpgradeComplete = nil 98 c.Check(cfg.Validate(), jc.Satisfies, errors.IsNotValid) 99 100 cfg = s.getConfig() 101 cfg.Tag = nil 102 c.Check(cfg.Validate(), jc.Satisfies, errors.IsNotValid) 103 104 cfg = s.getConfig() 105 cfg.Agent = nil 106 c.Check(cfg.Validate(), jc.Satisfies, errors.IsNotValid) 107 108 cfg = s.getConfig() 109 cfg.Logger = nil 110 c.Check(cfg.Validate(), jc.Satisfies, errors.IsNotValid) 111 112 cfg = s.getConfig() 113 cfg.OpenState = nil 114 c.Check(cfg.Validate(), jc.Satisfies, errors.IsNotValid) 115 116 cfg = s.getConfig() 117 cfg.PerformUpgrade = nil 118 c.Check(cfg.Validate(), jc.Satisfies, errors.IsNotValid) 119 120 cfg = s.getConfig() 121 cfg.RetryStrategy = retry.CallArgs{} 122 c.Check(cfg.Validate(), jc.Satisfies, errors.IsNotValid) 123 124 cfg = s.getConfig() 125 cfg.Clock = nil 126 c.Check(cfg.Validate(), jc.Satisfies, errors.IsNotValid) 127 } 128 129 func (s *workerSuite) TestNewLockSameVersionUnlocked(c *gc.C) { 130 defer s.setupMocks(c).Finish() 131 s.ignoreLogging(c) 132 133 s.agentCfg.EXPECT().UpgradedToVersion().Return(jujuversion.Current) 134 c.Assert(upgradedatabase.NewLock(s.agentCfg).IsUnlocked(), jc.IsTrue) 135 } 136 137 func (s *workerSuite) TestNewLockOldVersionLocked(c *gc.C) { 138 defer s.setupMocks(c).Finish() 139 s.ignoreLogging(c) 140 141 s.agentCfg.EXPECT().UpgradedToVersion().Return(version.Number{}) 142 c.Assert(upgradedatabase.NewLock(s.agentCfg).IsUnlocked(), jc.IsFalse) 143 } 144 145 func (s *workerSuite) TestAlreadyCompleteNoWork(c *gc.C) { 146 defer s.setupMocks(c).Finish() 147 s.ignoreLogging(c) 148 149 s.lock.EXPECT().IsUnlocked().Return(true) 150 151 w, err := upgradedatabase.NewWorker(s.getConfig()) 152 c.Assert(err, jc.ErrorIsNil) 153 154 workertest.CleanKill(c, w) 155 } 156 157 func (s *workerSuite) TestAlreadyUpgradedNoWork(c *gc.C) { 158 defer s.setupMocks(c).Finish() 159 s.ignoreLogging(c) 160 161 s.lock.EXPECT().IsUnlocked().Return(false) 162 s.agent.EXPECT().CurrentConfig().Return(s.agentCfg) 163 s.agentCfg.EXPECT().UpgradedToVersion().Return(jujuversion.Current) 164 s.lock.EXPECT().Unlock() 165 166 w, err := upgradedatabase.NewWorker(s.getConfig()) 167 c.Assert(err, jc.ErrorIsNil) 168 169 workertest.CleanKill(c, w) 170 } 171 172 func (s *workerSuite) TestNotPrimaryWatchForCompletionSuccess(c *gc.C) { 173 defer s.setupMocks(c).Finish() 174 s.ignoreLogging(c) 175 176 s.expectUpgradeRequired(false) 177 178 s.pool.EXPECT().SetStatus("0", status.Started, statusWaiting) 179 180 // Expect a watcher that will fire a change for the initial event 181 // and then a change for the watch loop. 182 s.upgradeInfo.EXPECT().Watch().Return(s.watcher) 183 changes := make(chan struct{}, 2) 184 changes <- struct{}{} 185 changes <- struct{}{} 186 s.watcher.EXPECT().Changes().Return(changes).MinTimes(1) 187 188 // Initial state is UpgradePending 189 s.upgradeInfo.EXPECT().Refresh().Return(nil).MinTimes(1) 190 s.upgradeInfo.EXPECT().Status().Return(state.UpgradePending) 191 // After the first change is retrieved from the channel above, we then say the upgrade is complete 192 s.upgradeInfo.EXPECT().Status().Return(state.UpgradeDBComplete) 193 194 s.pool.EXPECT().SetStatus("0", status.Started, statusConfirmed) 195 196 // We don't want to kill the worker while we are in the status observation 197 // loop, so we gate on this final expectation. 198 finished := make(chan struct{}) 199 s.lock.EXPECT().Unlock().Do(func() { 200 close(finished) 201 }) 202 203 w, err := upgradedatabase.NewWorker(s.getConfig()) 204 c.Assert(err, jc.ErrorIsNil) 205 206 select { 207 case <-finished: 208 case <-time.After(testing.LongWait): 209 } 210 workertest.CleanKill(c, w) 211 } 212 213 func (s *workerSuite) TestNotPrimaryWatchForCompletionSuccessRunning(c *gc.C) { 214 defer s.setupMocks(c).Finish() 215 s.ignoreLogging(c) 216 217 s.expectUpgradeRequired(false) 218 219 s.pool.EXPECT().SetStatus("0", status.Started, statusWaiting) 220 221 // Expect a watcher that will fire a change for the initial event 222 // and then a change for the watch loop. 223 s.upgradeInfo.EXPECT().Watch().Return(s.watcher) 224 changes := make(chan struct{}, 2) 225 changes <- struct{}{} 226 changes <- struct{}{} 227 s.watcher.EXPECT().Changes().Return(changes).MinTimes(1) 228 229 // Initial state is UpgradePending 230 s.upgradeInfo.EXPECT().Refresh().Return(nil).MinTimes(1) 231 s.upgradeInfo.EXPECT().Status().Return(state.UpgradePending) 232 // After the first change is retrieved from the channel above, 233 // we then say the upgrade has moved on to running (non-db) steps. 234 s.upgradeInfo.EXPECT().Status().Return(state.UpgradeRunning) 235 236 s.pool.EXPECT().SetStatus("0", status.Started, statusConfirmed) 237 238 // We don't want to kill the worker while we are in the status observation 239 // loop, so we gate on this final expectation. 240 finished := make(chan struct{}) 241 s.lock.EXPECT().Unlock().Do(func() { 242 close(finished) 243 }) 244 245 w, err := upgradedatabase.NewWorker(s.getConfig()) 246 c.Assert(err, jc.ErrorIsNil) 247 248 select { 249 case <-finished: 250 case <-time.After(testing.LongWait): 251 } 252 workertest.CleanKill(c, w) 253 } 254 255 func (s *workerSuite) TestNotPrimaryWatchForCompletionTimeout(c *gc.C) { 256 defer s.setupMocks(c).Finish() 257 258 s.expectUpgradeRequired(false) 259 260 s.logger.EXPECT().Infof(logWaiting) 261 s.pool.EXPECT().SetStatus("0", status.Started, statusWaiting) 262 263 // Expect a watcher that will fire a change for the initial event 264 // and then a change for the watch loop. 265 s.upgradeInfo.EXPECT().Watch().Return(s.watcher) 266 267 // Have changes available for the first couple of loops, but later 268 // stop to allow timeout select case to fire for sure 269 changes := make(chan struct{}, 2) 270 changes <- struct{}{} 271 changes <- struct{}{} 272 s.watcher.EXPECT().Changes().Return(changes).AnyTimes() 273 274 timeout := make(chan time.Time, 1) 275 s.clock.EXPECT().After(10 * time.Minute).Return(timeout) 276 277 neverTimeout := make(chan time.Time) 278 s.clock.EXPECT().After(5 * time.Second).Return(neverTimeout).MaxTimes(1) 279 280 // Primary does not complete the upgrade. 281 // After we have gone to the upgrade pending state, trip the timeout. 282 s.upgradeInfo.EXPECT().Refresh().Return(nil).AnyTimes() 283 284 // Don't timeout on first few check of status. 285 s.upgradeInfo.EXPECT().Status().Return(state.UpgradePending).Times(2) 286 s.upgradeInfo.EXPECT().Status().DoAndReturn(func() state.UpgradeStatus { 287 // We only care about queueing one in the buffer. 288 // Carry on if we're jammed up - we'll fail elsewhere. 289 select { 290 case timeout <- time.Now(): 291 default: 292 } 293 return state.UpgradePending 294 }).AnyTimes() 295 296 // We don't want to kill the worker while we are in the status observation 297 // loop, so we gate on this final expectation. 298 finished := make(chan struct{}) 299 s.pool.EXPECT().SetStatus("0", status.Error, statusUpgrading).Do(func(string, status.Status, string) { 300 close(finished) 301 }) 302 303 // Note that UpgradeComplete is not unlocked. 304 305 cfg := s.getConfig() 306 cfg.Clock = s.clock 307 308 w, err := upgradedatabase.NewWorker(cfg) 309 c.Assert(err, jc.ErrorIsNil) 310 311 select { 312 case <-finished: 313 case <-time.After(testing.LongWait): 314 } 315 workertest.DirtyKill(c, w) 316 } 317 318 func (s *workerSuite) TestNotPrimaryButPrimaryFinished(c *gc.C) { 319 defer s.setupMocks(c).Finish() 320 s.ignoreLogging(c) 321 322 s.expectUpgradeRequired(false) 323 324 s.pool.EXPECT().SetStatus("0", status.Started, statusWaiting) 325 326 // Expect the watcher to be created, and then the Status is examined. 327 // If the status is already complete, there are no calls to the Changes for the watcher. 328 329 // Expect a watcher that will fire a change for the initial event 330 // and then a change for the watch loop. 331 s.upgradeInfo.EXPECT().Watch().Return(s.watcher) 332 // Primary already completed the upgrade. 333 s.upgradeInfo.EXPECT().Refresh().Return(nil).MinTimes(1) 334 s.upgradeInfo.EXPECT().Status().Return(state.UpgradeDBComplete) 335 336 s.pool.EXPECT().SetStatus("0", status.Started, statusConfirmed) 337 338 // We don't want to kill the worker while we are in the status observation 339 // loop, so we gate on this final expectation. 340 finished := make(chan struct{}) 341 s.lock.EXPECT().Unlock().Do(func() { 342 close(finished) 343 }) 344 345 w, err := upgradedatabase.NewWorker(s.getConfig()) 346 c.Assert(err, jc.ErrorIsNil) 347 348 select { 349 case <-finished: 350 case <-time.After(testing.LongWait): 351 } 352 workertest.CleanKill(c, w) 353 } 354 355 func (s *workerSuite) TestNotPrimaryButBecomePrimary(c *gc.C) { 356 defer s.setupMocks(c).Finish() 357 s.ignoreLogging(c) 358 359 // first IsPrimary is false 360 s.expectUpgradeRequired(false) 361 362 // We don't want to kill the worker while we are in the status observation 363 // loop, so we gate on this final expectation. 364 finished := make(chan struct{}) 365 s.pool.EXPECT().IsPrimary("0").DoAndReturn(func(_ interface{}) (bool, error) { 366 // The second isPrimary returns true and marks completion 367 close(finished) 368 return true, nil 369 }) 370 371 s.pool.EXPECT().SetStatus("0", status.Started, statusWaiting) 372 373 s.upgradeInfo.EXPECT().Watch().Return(s.watcher) 374 s.upgradeInfo.EXPECT().Refresh().Return(nil).MinTimes(1) 375 s.upgradeInfo.EXPECT().Status().Return(state.UpgradePending) 376 377 // First clock.After returns the timeout clock 378 // After that, returns a clock to wait to re-check primary 379 timeout := make(chan time.Time, 1) 380 checkPrimary := make(chan time.Time, 1) 381 checkPrimary <- time.Now() 382 s.clock.EXPECT().After(10 * time.Minute).Return(timeout) 383 s.clock.EXPECT().After(5 * time.Second).Return(checkPrimary) 384 385 changes := make(chan struct{}, 1) 386 s.watcher.EXPECT().Changes().Return(changes) 387 388 cfg := s.getConfig() 389 cfg.Clock = s.clock 390 391 w, err := upgradedatabase.NewWorker(cfg) 392 c.Assert(err, jc.ErrorIsNil) 393 394 select { 395 case <-finished: 396 case <-time.After(testing.LongWait): 397 } 398 workertest.DirtyKill(c, w) 399 } 400 401 func (s *workerSuite) TestNotPrimaryButBecomePrimaryByError(c *gc.C) { 402 defer s.setupMocks(c).Finish() 403 s.ignoreLogging(c) 404 405 // first IsPrimary is false 406 s.expectUpgradeRequired(false) 407 408 // We don't want to kill the worker while we are in the status observation 409 // loop, so we gate on this final expectation. 410 finished := make(chan struct{}) 411 s.pool.EXPECT().IsPrimary("0").DoAndReturn(func(_ interface{}) (bool, error) { 412 // The second isPrimary returns true and marks completion 413 close(finished) 414 return false, errors.New("Primary has changed") 415 }) 416 417 s.pool.EXPECT().SetStatus("0", status.Started, statusWaiting) 418 419 s.upgradeInfo.EXPECT().Watch().Return(s.watcher) 420 s.upgradeInfo.EXPECT().Refresh().Return(nil).MinTimes(1) 421 s.upgradeInfo.EXPECT().Status().Return(state.UpgradePending) 422 423 // First clock.After returns the timeout clock 424 // After that, returns a clock to wait to re-check primary 425 timeout := make(chan time.Time, 1) 426 checkPrimary := make(chan time.Time, 1) 427 checkPrimary <- time.Now() 428 s.clock.EXPECT().After(10 * time.Minute).Return(timeout) 429 s.clock.EXPECT().After(5 * time.Second).Return(checkPrimary) 430 431 changes := make(chan struct{}, 1) 432 s.watcher.EXPECT().Changes().Return(changes) 433 434 cfg := s.getConfig() 435 cfg.Clock = s.clock 436 437 w, err := upgradedatabase.NewWorker(cfg) 438 c.Assert(err, jc.ErrorIsNil) 439 440 select { 441 case <-finished: 442 case <-time.After(testing.LongWait): 443 } 444 workertest.DirtyKill(c, w) 445 } 446 func (s *workerSuite) TestNotPrimaryButBecomePrimaryAfter2Checks(c *gc.C) { 447 defer s.setupMocks(c).Finish() 448 s.ignoreLogging(c) 449 450 // first IsPrimary is false 451 s.expectUpgradeRequired(false) 452 // so is the second 453 s.pool.EXPECT().IsPrimary("0").Return(false, nil) 454 455 // We don't want to kill the worker while we are in the status observation 456 // loop, so we gate on this final expectation. 457 finished := make(chan struct{}) 458 s.pool.EXPECT().IsPrimary("0").DoAndReturn(func(_ interface{}) (bool, error) { 459 // The third isPrimary returns true and marks completion 460 close(finished) 461 return true, nil 462 }) 463 464 s.pool.EXPECT().SetStatus("0", status.Started, statusWaiting) 465 466 s.upgradeInfo.EXPECT().Watch().Return(s.watcher) 467 s.upgradeInfo.EXPECT().Refresh().Return(nil).MinTimes(1) 468 s.upgradeInfo.EXPECT().Status().Return(state.UpgradePending) 469 470 // First clock.After returns the timeout clock 471 // After that, returns a clock to wait to re-check primary 472 timeout := make(chan time.Time, 1) 473 checkPrimary := make(chan time.Time, 2) 474 checkPrimary <- time.Now() 475 checkPrimary <- time.Now() 476 477 s.clock.EXPECT().After(10 * time.Minute).Return(timeout) 478 s.clock.EXPECT().After(5 * time.Second).Return(checkPrimary).Times(2) 479 480 changes := make(chan struct{}, 1) 481 s.watcher.EXPECT().Changes().Return(changes) 482 483 cfg := s.getConfig() 484 cfg.Clock = s.clock 485 486 w, err := upgradedatabase.NewWorker(cfg) 487 c.Assert(err, jc.ErrorIsNil) 488 489 select { 490 case <-finished: 491 case <-time.After(testing.LongWait): 492 } 493 workertest.DirtyKill(c, w) 494 } 495 496 func (s *workerSuite) TestUpgradedSuccessFirst(c *gc.C) { 497 defer s.setupMocks(c).Finish() 498 s.ignoreLogging(c) 499 500 s.expectUpgradeRequired(true) 501 s.expectExecution() 502 503 s.upgradeInfo.EXPECT().SetStatus(state.UpgradeDBComplete).Return(nil) 504 s.pool.EXPECT().SetStatus("0", status.Started, statusUpgrading) 505 s.pool.EXPECT().SetStatus("0", status.Started, statusCompleted) 506 507 s.lock.EXPECT().Unlock() 508 509 w, err := upgradedatabase.NewWorker(s.getConfig()) 510 c.Assert(err, jc.ErrorIsNil) 511 512 workertest.CleanKill(c, w) 513 } 514 515 func (s *workerSuite) TestUpgradedRetryThenSuccess(c *gc.C) { 516 defer s.setupMocks(c).Finish() 517 518 s.expectUpgradeRequired(true) 519 s.expectExecution() 520 521 s.logger.EXPECT().Infof(logRunning, jujuversion.Current) 522 s.pool.EXPECT().SetStatus("0", status.Started, statusUpgrading) 523 524 cfg := s.getConfig() 525 msg := "database upgrade from %v to %v for %q failed (%s): %v" 526 s.logger.EXPECT().Errorf(msg, version.Number{}, jujuversion.Current, cfg.Tag, "will retry", gomock.Any()) 527 528 s.pool.EXPECT().SetStatus("0", status.Error, statusUpgrading) 529 530 s.upgradeInfo.EXPECT().SetStatus(state.UpgradeDBComplete).Return(nil) 531 s.logger.EXPECT().Infof("database upgrade for %v completed successfully.", jujuversion.Current) 532 s.pool.EXPECT().SetStatus("0", status.Started, statusCompleted) 533 534 s.lock.EXPECT().Unlock() 535 536 var failedOnce bool 537 cfg.PerformUpgrade = func(ver version.Number, targets []upgrades.Target, ctx func() upgrades.Context) error { 538 c.Check(ver, gc.Equals, version.Number{}) 539 c.Check(targets, gc.DeepEquals, []upgrades.Target{upgrades.DatabaseMaster}) 540 541 if !failedOnce { 542 failedOnce = true 543 return errors.New("boom") 544 } 545 return nil 546 } 547 548 w, err := upgradedatabase.NewWorker(cfg) 549 c.Assert(err, jc.ErrorIsNil) 550 551 workertest.CleanKill(c, w) 552 } 553 554 func (s *workerSuite) TestUpgradedRetryAllFailed(c *gc.C) { 555 defer s.setupMocks(c).Finish() 556 557 s.expectUpgradeRequired(true) 558 s.expectExecution() 559 560 s.logger.EXPECT().Infof(logRunning, jujuversion.Current) 561 s.pool.EXPECT().SetStatus("0", status.Started, statusUpgrading) 562 563 cfg := s.getConfig() 564 msg := "database upgrade from %v to %v for %q failed (%s): %v" 565 s.logger.EXPECT().Errorf(msg, version.Number{}, jujuversion.Current, cfg.Tag, "will retry", gomock.Any()).MinTimes(1) 566 s.logger.EXPECT().Errorf(msg, version.Number{}, jujuversion.Current, cfg.Tag, "giving up", gomock.Any()) 567 568 s.pool.EXPECT().SetStatus("0", status.Error, statusUpgrading).MinTimes(1) 569 570 // Note that UpgradeComplete is not unlocked. 571 572 cfg.PerformUpgrade = func(ver version.Number, targets []upgrades.Target, ctx func() upgrades.Context) error { 573 c.Check(ver, gc.Equals, version.Number{}) 574 c.Check(targets, gc.DeepEquals, []upgrades.Target{upgrades.DatabaseMaster}) 575 return errors.New("boom") 576 } 577 578 w, err := upgradedatabase.NewWorker(cfg) 579 c.Assert(err, jc.ErrorIsNil) 580 581 workertest.DirtyKill(c, w) 582 } 583 584 func (s *workerSuite) getConfig() upgradedatabase.Config { 585 return upgradedatabase.Config{ 586 UpgradeComplete: s.lock, 587 Tag: names.NewMachineTag("0"), 588 Agent: s.agent, 589 Logger: s.logger, 590 OpenState: func() (upgradedatabase.Pool, error) { return s.pool, nil }, 591 PerformUpgrade: func(version.Number, []upgrades.Target, func() upgrades.Context) error { return nil }, 592 RetryStrategy: retry.CallArgs{Clock: clock.WallClock, Delay: time.Millisecond, Attempts: 3}, 593 Clock: clock.WallClock, 594 } 595 } 596 597 func (s *workerSuite) setupMocks(c *gc.C) *gomock.Controller { 598 ctrl := gomock.NewController(c) 599 600 s.lock = NewMockLock(ctrl) 601 s.agent = NewMockAgent(ctrl) 602 s.agentCfg = NewMockConfig(ctrl) 603 s.cfgSetter = NewMockConfigSetter(ctrl) 604 s.logger = NewMockLogger(ctrl) 605 s.clock = NewMockClock(ctrl) 606 s.upgradeInfo = NewMockUpgradeInfo(ctrl) 607 608 s.pool = NewMockPool(ctrl) 609 s.pool.EXPECT().Close().Return(nil).MaxTimes(1) 610 611 s.watcher = NewMockNotifyWatcher(ctrl) 612 s.watcher.EXPECT().Stop().Return(nil).MaxTimes(1) 613 614 return ctrl 615 } 616 617 // expectUpgradeRequired sets expectations for a scenario where a database 618 // upgrade needs to be run. 619 // The input bool simulates whether we are running the primary MongoDB. 620 func (s *workerSuite) expectUpgradeRequired(isPrimary bool) { 621 fromVersion := version.Number{} 622 623 s.lock.EXPECT().IsUnlocked().Return(false) 624 s.pool.EXPECT().IsPrimary("0").Return(isPrimary, nil) 625 s.agent.EXPECT().CurrentConfig().Return(s.agentCfg) 626 s.agentCfg.EXPECT().UpgradedToVersion().Return(fromVersion) 627 s.pool.EXPECT().EnsureUpgradeInfo("0", fromVersion, jujuversion.Current).Return(s.upgradeInfo, nil) 628 } 629 630 // expectExecution simply executes the mutator passed to ChangeConfig. 631 // In this case it is worker.runUpgradeSteps. 632 func (s *workerSuite) expectExecution() { 633 s.agent.EXPECT().ChangeConfig(gomock.Any()).DoAndReturn(func(f agent.ConfigMutator) error { 634 return f(s.cfgSetter) 635 }) 636 }