github.com/juju/juju@v0.0.0-20240327075706-a90865de2538/worker/dbaccessor/worker_test.go (about)

     1  // Copyright 2022 Canonical Ltd.
     2  // Licensed under the AGPLv3, see LICENCE file for details.
     3  
     4  package dbaccessor
     5  
     6  import (
     7  	"context"
     8  	"errors"
     9  	"time"
    10  
    11  	jc "github.com/juju/testing/checkers"
    12  	"github.com/juju/worker/v3"
    13  	"github.com/juju/worker/v3/dependency"
    14  	"github.com/juju/worker/v3/workertest"
    15  	"go.uber.org/mock/gomock"
    16  	gc "gopkg.in/check.v1"
    17  
    18  	"github.com/juju/juju/database/app"
    19  	"github.com/juju/juju/database/dqlite"
    20  	"github.com/juju/juju/pubsub/apiserver"
    21  	"github.com/juju/juju/testing"
    22  )
    23  
    24  type workerSuite struct {
    25  	baseSuite
    26  
    27  	nodeManager *MockNodeManager
    28  }
    29  
    30  var _ = gc.Suite(&workerSuite{})
    31  
    32  func (s *workerSuite) TestStartupTimeoutSingleControllerReconfigure(c *gc.C) {
    33  	defer s.setupMocks(c).Finish()
    34  
    35  	s.expectAnyLogs()
    36  	s.expectClock()
    37  	s.expectTrackedDBKill()
    38  
    39  	mgrExp := s.nodeManager.EXPECT()
    40  	mgrExp.EnsureDataDir().Return(c.MkDir(), nil)
    41  	mgrExp.IsExistingNode().Return(true, nil).Times(2)
    42  	mgrExp.IsLoopbackBound(gomock.Any()).Return(false, nil).Times(3)
    43  	mgrExp.IsLoopbackPreferred().Return(false).Times(2)
    44  	mgrExp.WithTLSOption().Return(nil, nil)
    45  	mgrExp.WithLogFuncOption().Return(nil)
    46  	mgrExp.WithTracingOption().Return(nil)
    47  	mgrExp.SetClusterToLocalNode(gomock.Any()).Return(nil)
    48  
    49  	// App gets started, we time out waiting, then we close it.
    50  	appExp := s.dbApp.EXPECT()
    51  	appExp.Ready(gomock.Any()).Return(context.DeadlineExceeded)
    52  	appExp.Close().Return(nil)
    53  
    54  	// We expect to request API details.
    55  	s.hub.EXPECT().Subscribe(apiserver.DetailsTopic, gomock.Any()).Return(func() {}, nil)
    56  	s.hub.EXPECT().Publish(apiserver.DetailsRequestTopic, gomock.Any()).Return(func() {}, nil)
    57  
    58  	w := s.newWorker(c)
    59  	defer w.Kill()
    60  
    61  	// Topology is just us. We should reconfigure the node and shut down.
    62  	select {
    63  	case w.(*dbWorker).apiServerChanges <- apiserver.Details{
    64  		Servers: map[string]apiserver.APIServer{"0": {ID: "0", InternalAddress: "10.6.6.6:1234"}},
    65  	}:
    66  	case <-time.After(testing.LongWait):
    67  		c.Fatal("timed out waiting for cluster change to be processed")
    68  	}
    69  
    70  	err := workertest.CheckKilled(c, w)
    71  	c.Assert(errors.Is(err, dependency.ErrBounce), jc.IsTrue)
    72  }
    73  
    74  func (s *workerSuite) TestStartupTimeoutMultipleControllerRetry(c *gc.C) {
    75  	defer s.setupMocks(c).Finish()
    76  
    77  	s.expectAnyLogs()
    78  	s.expectClock()
    79  	s.expectTrackedDBKill()
    80  
    81  	mgrExp := s.nodeManager.EXPECT()
    82  	mgrExp.EnsureDataDir().Return(c.MkDir(), nil).Times(2)
    83  	mgrExp.IsExistingNode().Return(true, nil).Times(2)
    84  	mgrExp.IsLoopbackBound(gomock.Any()).Return(false, nil).Times(4)
    85  
    86  	// We expect 1 attempt to start and 2 attempts to reconfigure.
    87  	mgrExp.IsLoopbackPreferred().Return(false).Times(3)
    88  
    89  	// We expect 2 attempts to start.
    90  	mgrExp.WithTLSOption().Return(nil, nil).Times(2)
    91  	mgrExp.WithLogFuncOption().Return(nil).Times(2)
    92  	mgrExp.WithTracingOption().Return(nil).Times(2)
    93  
    94  	// App gets started, we time out waiting, then we close it both times.
    95  	appExp := s.dbApp.EXPECT()
    96  	appExp.Ready(gomock.Any()).Return(context.DeadlineExceeded).Times(2)
    97  	appExp.Close().Return(nil).Times(2)
    98  
    99  	// We expect to request API details.
   100  	s.hub.EXPECT().Subscribe(apiserver.DetailsTopic, gomock.Any()).Return(func() {}, nil)
   101  	s.hub.EXPECT().Publish(apiserver.DetailsRequestTopic, gomock.Any()).Return(func() {}, nil).Times(2)
   102  
   103  	w := s.newWorker(c)
   104  	defer workertest.CleanKill(c, w)
   105  	dbw := w.(*dbWorker)
   106  
   107  	// If there are multiple servers reported, we can't reason about our
   108  	// current state in a discrete fashion. The worker throws an error.
   109  	select {
   110  	case dbw.apiServerChanges <- apiserver.Details{
   111  		Servers: map[string]apiserver.APIServer{
   112  			"0": {ID: "0", InternalAddress: "10.6.6.6:1234"},
   113  			"1": {ID: "1", InternalAddress: "10.6.6.7:1234"},
   114  		},
   115  	}:
   116  	case <-time.After(testing.LongWait):
   117  		c.Fatal("timed out waiting for cluster change to be processed")
   118  	}
   119  
   120  	// At this point, the Dqlite node is not started.
   121  	// The worker is waiting for legitimate server detail messages.
   122  	select {
   123  	case <-dbw.dbReady:
   124  		c.Fatal("Dqlite node should not be started yet.")
   125  	case <-time.After(testing.ShortWait):
   126  	}
   127  }
   128  
   129  func (s *workerSuite) TestStartupNotExistingNodeThenCluster(c *gc.C) {
   130  	defer s.setupMocks(c).Finish()
   131  
   132  	s.expectAnyLogs()
   133  	s.expectClock()
   134  	s.expectTrackedDBKill()
   135  
   136  	mgrExp := s.nodeManager.EXPECT()
   137  	mgrExp.EnsureDataDir().Return(c.MkDir(), nil)
   138  	mgrExp.IsExistingNode().Return(false, nil).Times(4)
   139  	mgrExp.WithAddressOption("10.6.6.6").Return(nil)
   140  	mgrExp.WithClusterOption([]string{"10.6.6.7"}).Return(nil)
   141  	mgrExp.WithLogFuncOption().Return(nil)
   142  	mgrExp.WithTLSOption().Return(nil, nil)
   143  	mgrExp.WithTracingOption().Return(nil)
   144  	mgrExp.IsLoopbackBound(gomock.Any()).Return(false, nil)
   145  
   146  	// Expects 1 attempt to start and 2 attempts to reconfigure.
   147  	mgrExp.IsLoopbackPreferred().Return(false).Times(3)
   148  
   149  	s.client.EXPECT().Cluster(gomock.Any()).Return(nil, nil)
   150  
   151  	s.expectNodeStartupAndShutdown()
   152  	s.dbApp.EXPECT().Handover(gomock.Any()).Return(nil)
   153  
   154  	// When we are starting up as a new node,
   155  	// we request details immediately.
   156  	s.hub.EXPECT().Subscribe(apiserver.DetailsTopic, gomock.Any()).Return(func() {}, nil)
   157  	s.hub.EXPECT().Publish(apiserver.DetailsRequestTopic, gomock.Any()).Return(func() {}, nil)
   158  
   159  	w := s.newWorker(c)
   160  	defer workertest.DirtyKill(c, w)
   161  	dbw := w.(*dbWorker)
   162  
   163  	// Without a bind address for ourselves we keep waiting.
   164  	select {
   165  	case dbw.apiServerChanges <- apiserver.Details{
   166  		Servers: map[string]apiserver.APIServer{
   167  			"0": {ID: "0"},
   168  			"1": {ID: "1", InternalAddress: "10.6.6.7:1234"},
   169  		},
   170  	}:
   171  	case <-time.After(testing.LongWait):
   172  		c.Fatal("timed out waiting for cluster change to be processed")
   173  	}
   174  
   175  	// Without other cluster members we keep waiting.
   176  	select {
   177  	case w.(*dbWorker).apiServerChanges <- apiserver.Details{
   178  		Servers: map[string]apiserver.APIServer{
   179  			"0": {ID: "0", InternalAddress: "10.6.6.6:1234"},
   180  		},
   181  	}:
   182  	case <-time.After(testing.LongWait):
   183  		c.Fatal("timed out waiting for cluster change to be processed")
   184  	}
   185  
   186  	// At this point, the Dqlite node is not started.
   187  	// The worker is waiting for legitimate server detail messages.
   188  	select {
   189  	case <-dbw.dbReady:
   190  		c.Fatal("Dqlite node should not be started yet.")
   191  	case <-time.After(testing.ShortWait):
   192  	}
   193  
   194  	// Push a message onto the API details channel,
   195  	// enabling node startup as a cluster member.
   196  	select {
   197  	case w.(*dbWorker).apiServerChanges <- apiserver.Details{
   198  		Servers: map[string]apiserver.APIServer{
   199  			"0": {ID: "0", InternalAddress: "10.6.6.6:1234"},
   200  			"1": {ID: "1", InternalAddress: "10.6.6.7:1234"},
   201  		},
   202  	}:
   203  	case <-time.After(testing.LongWait):
   204  		c.Fatal("timed out waiting for cluster change to be processed")
   205  	}
   206  
   207  	ensureStartup(c, dbw)
   208  
   209  	s.client.EXPECT().Leader(gomock.Any()).Return(&dqlite.NodeInfo{
   210  		ID:      1,
   211  		Address: "10.10.1.1",
   212  	}, nil)
   213  	report := w.(interface{ Report() map[string]any }).Report()
   214  	c.Assert(report, MapHasKeys, []string{
   215  		"leader",
   216  		"leader-id",
   217  		"leader-role",
   218  	})
   219  
   220  	workertest.CleanKill(c, w)
   221  }
   222  
   223  func (s *workerSuite) TestWorkerStartupExistingNode(c *gc.C) {
   224  	defer s.setupMocks(c).Finish()
   225  
   226  	s.expectAnyLogs()
   227  	s.expectClock()
   228  	s.expectTrackedDBKill()
   229  
   230  	mgrExp := s.nodeManager.EXPECT()
   231  	mgrExp.EnsureDataDir().Return(c.MkDir(), nil)
   232  
   233  	// If this is an existing node, we do not invoke the address or cluster
   234  	// options, but if the node is not as bootstrapped, we do assume it is
   235  	// part of a cluster, and uses the TLS option.
   236  	// IsBootstrapped node is called twice - once to check the startup
   237  	// conditions and then again upon worker shutdown.
   238  	mgrExp.IsExistingNode().Return(true, nil)
   239  	mgrExp.IsLoopbackBound(gomock.Any()).Return(false, nil).Times(2)
   240  	mgrExp.IsLoopbackPreferred().Return(false)
   241  	mgrExp.WithLogFuncOption().Return(nil)
   242  	mgrExp.WithTLSOption().Return(nil, nil)
   243  	mgrExp.WithTracingOption().Return(nil)
   244  
   245  	s.client.EXPECT().Cluster(gomock.Any()).Return(nil, nil)
   246  
   247  	s.expectNodeStartupAndShutdown()
   248  	s.dbApp.EXPECT().Handover(gomock.Any()).Return(nil)
   249  
   250  	s.hub.EXPECT().Subscribe(apiserver.DetailsTopic, gomock.Any()).Return(func() {}, nil)
   251  
   252  	w := s.newWorker(c)
   253  	defer workertest.DirtyKill(c, w)
   254  
   255  	ensureStartup(c, w.(*dbWorker))
   256  
   257  	workertest.CleanKill(c, w)
   258  }
   259  
   260  func (s *workerSuite) TestWorkerStartupExistingNodeWithLoopbackPreferred(c *gc.C) {
   261  	defer s.setupMocks(c).Finish()
   262  
   263  	s.expectAnyLogs()
   264  	s.expectClock()
   265  	s.expectTrackedDBKill()
   266  
   267  	mgrExp := s.nodeManager.EXPECT()
   268  	mgrExp.EnsureDataDir().Return(c.MkDir(), nil)
   269  
   270  	// If this is an existing node, we do not invoke the address or cluster
   271  	// options, but if the node is not as bootstrapped, we do assume it is
   272  	// part of a cluster, and does not use the TLS option.
   273  	// IsBootstrapped node is called twice - once to check the startup
   274  	// conditions and then again upon worker shutdown.
   275  	mgrExp.IsExistingNode().Return(true, nil)
   276  	mgrExp.IsLoopbackBound(gomock.Any()).Return(true, nil).Times(2)
   277  	mgrExp.IsLoopbackPreferred().Return(false)
   278  	mgrExp.WithLogFuncOption().Return(nil)
   279  	mgrExp.WithTracingOption().Return(nil)
   280  
   281  	s.client.EXPECT().Cluster(gomock.Any()).Return(nil, nil)
   282  
   283  	s.expectNodeStartupAndShutdown()
   284  
   285  	// We don't expect a handover, because we're not rebinding.
   286  
   287  	s.hub.EXPECT().Subscribe(apiserver.DetailsTopic, gomock.Any()).Return(func() {}, nil)
   288  
   289  	w := s.newWorker(c)
   290  	defer workertest.DirtyKill(c, w)
   291  
   292  	ensureStartup(c, w.(*dbWorker))
   293  
   294  	workertest.CleanKill(c, w)
   295  }
   296  
   297  func (s *workerSuite) TestWorkerStartupAsBootstrapNodeSingleServerNoRebind(c *gc.C) {
   298  	defer s.setupMocks(c).Finish()
   299  
   300  	s.expectAnyLogs()
   301  	s.expectClock()
   302  	s.expectTrackedDBKill()
   303  
   304  	dataDir := c.MkDir()
   305  	mgrExp := s.nodeManager.EXPECT()
   306  	mgrExp.EnsureDataDir().Return(dataDir, nil).MinTimes(1)
   307  
   308  	// If this is an existing node, we do not
   309  	// invoke the address or cluster options.
   310  	mgrExp.IsExistingNode().Return(true, nil).Times(3)
   311  	mgrExp.IsLoopbackBound(gomock.Any()).Return(true, nil).Times(4)
   312  	mgrExp.IsLoopbackPreferred().Return(false).Times(3)
   313  	mgrExp.WithLogFuncOption().Return(nil)
   314  	mgrExp.WithTracingOption().Return(nil)
   315  
   316  	s.client.EXPECT().Cluster(gomock.Any()).Return(nil, nil)
   317  
   318  	s.expectNodeStartupAndShutdown()
   319  
   320  	s.hub.EXPECT().Subscribe(apiserver.DetailsTopic, gomock.Any()).Return(func() {}, nil)
   321  
   322  	w := s.newWorker(c)
   323  	defer workertest.DirtyKill(c, w)
   324  	dbw := w.(*dbWorker)
   325  
   326  	ensureStartup(c, dbw)
   327  
   328  	// At this point we have started successfully.
   329  	// Push a message onto the API details channel.
   330  	// A single server does not cause a binding change.
   331  	select {
   332  	case dbw.apiServerChanges <- apiserver.Details{
   333  		Servers: map[string]apiserver.APIServer{
   334  			"0": {ID: "0", InternalAddress: "10.6.6.6:1234"},
   335  		},
   336  	}:
   337  	case <-time.After(testing.LongWait):
   338  		c.Fatal("timed out waiting for cluster change to be processed")
   339  	}
   340  
   341  	// Multiple servers still do not cause a binding change
   342  	// if there is no internal address to bind to.
   343  	select {
   344  	case dbw.apiServerChanges <- apiserver.Details{
   345  		Servers: map[string]apiserver.APIServer{
   346  			"0": {ID: "0"},
   347  			"1": {ID: "1", InternalAddress: "10.6.6.7:1234"},
   348  			"2": {ID: "2", InternalAddress: "10.6.6.8:1234"},
   349  		},
   350  	}:
   351  	case <-time.After(testing.LongWait):
   352  		c.Fatal("timed out waiting for cluster change to be processed")
   353  	}
   354  
   355  	workertest.CleanKill(c, w)
   356  }
   357  
   358  func (s *workerSuite) TestWorkerStartupAsBootstrapNodeThenReconfigure(c *gc.C) {
   359  	defer s.setupMocks(c).Finish()
   360  
   361  	s.expectAnyLogs()
   362  	s.expectClock()
   363  	s.expectTrackedDBKill()
   364  
   365  	dataDir := c.MkDir()
   366  	mgrExp := s.nodeManager.EXPECT()
   367  	mgrExp.EnsureDataDir().Return(dataDir, nil).MinTimes(1)
   368  
   369  	// If this is an existing node, we do not
   370  	// invoke the address or cluster options.
   371  	mgrExp.IsExistingNode().Return(true, nil).Times(2)
   372  	mgrExp.IsLoopbackPreferred().Return(false).Times(2)
   373  	gomock.InOrder(
   374  		mgrExp.IsLoopbackBound(gomock.Any()).Return(true, nil).Times(2),
   375  		// This is the check at shutdown.
   376  		mgrExp.IsLoopbackBound(gomock.Any()).Return(false, nil))
   377  	mgrExp.WithLogFuncOption().Return(nil)
   378  	mgrExp.WithTracingOption().Return(nil)
   379  
   380  	// These are the expectations around reconfiguring
   381  	// the cluster and local node.
   382  	mgrExp.ClusterServers(gomock.Any()).Return([]dqlite.NodeInfo{
   383  		{
   384  			ID:      3297041220608546238,
   385  			Address: "127.0.0.1:17666",
   386  			Role:    0,
   387  		},
   388  	}, nil)
   389  	mgrExp.SetClusterServers(gomock.Any(), []dqlite.NodeInfo{
   390  		{
   391  			ID:      3297041220608546238,
   392  			Address: "10.6.6.6:17666",
   393  			Role:    0,
   394  		},
   395  	}).Return(nil)
   396  	mgrExp.SetNodeInfo(dqlite.NodeInfo{
   397  		ID:      3297041220608546238,
   398  		Address: "10.6.6.6:17666",
   399  		Role:    0,
   400  	}).Return(nil)
   401  
   402  	s.client.EXPECT().Cluster(gomock.Any()).Return(nil, nil)
   403  
   404  	// Although the shut-down check for IsLoopbackBound returns false,
   405  	// this call to shut-down is actually run before reconfiguring the node.
   406  	// When the loop exits, the node is already set to nil.
   407  	s.expectNodeStartupAndShutdown()
   408  
   409  	s.hub.EXPECT().Subscribe(apiserver.DetailsTopic, gomock.Any()).Return(func() {}, nil)
   410  
   411  	w := s.newWorker(c)
   412  	defer workertest.DirtyKill(c, w)
   413  	dbw := w.(*dbWorker)
   414  
   415  	ensureStartup(c, dbw)
   416  
   417  	// At this point we have started successfully.
   418  	// Push a message onto the API details channel to simulate a move into HA.
   419  	select {
   420  	case dbw.apiServerChanges <- apiserver.Details{
   421  		Servers: map[string]apiserver.APIServer{
   422  			"0": {ID: "0", InternalAddress: "10.6.6.6:1234"},
   423  			"1": {ID: "1", InternalAddress: "10.6.6.7:1234"},
   424  			"2": {ID: "2", InternalAddress: "10.6.6.8:1234"},
   425  		},
   426  	}:
   427  	case <-time.After(testing.LongWait):
   428  		c.Fatal("timed out waiting for cluster change to be processed")
   429  	}
   430  
   431  	err := workertest.CheckKilled(c, w)
   432  	c.Assert(errors.Is(err, dependency.ErrBounce), jc.IsTrue)
   433  }
   434  
   435  func (s *workerSuite) TestWorkerStartupAsBootstrapNodeThenReconfigureWithLoopbackPreferred(c *gc.C) {
   436  	defer s.setupMocks(c).Finish()
   437  
   438  	s.expectAnyLogs()
   439  	s.expectClock()
   440  	s.expectTrackedDBKill()
   441  
   442  	dataDir := c.MkDir()
   443  	mgrExp := s.nodeManager.EXPECT()
   444  	mgrExp.EnsureDataDir().Return(dataDir, nil).MinTimes(1)
   445  	mgrExp.WithLogFuncOption().Return(nil)
   446  	mgrExp.WithTracingOption().Return(nil)
   447  
   448  	// If this is a loopback preferred node, we do not invoke the TLS or
   449  	// cluster options.
   450  	mgrExp.IsExistingNode().Return(true, nil).Times(2)
   451  	mgrExp.IsLoopbackPreferred().Return(true).Times(2)
   452  	mgrExp.IsLoopbackBound(gomock.Any()).Return(true, nil).Times(2)
   453  
   454  	// Ensure that we expect a clean startup and shutdown.
   455  	s.expectNodeStartupAndShutdown()
   456  
   457  	s.hub.EXPECT().Subscribe(apiserver.DetailsTopic, gomock.Any()).Return(func() {}, nil)
   458  
   459  	s.client.EXPECT().Cluster(gomock.Any()).Return(nil, nil)
   460  
   461  	w := s.newWorker(c)
   462  	defer workertest.DirtyKill(c, w)
   463  	dbw := w.(*dbWorker)
   464  
   465  	ensureStartup(c, dbw)
   466  
   467  	// At this point we have started successfully.
   468  	// Push a message onto the API details channel to simulate changes.
   469  	select {
   470  	case dbw.apiServerChanges <- apiserver.Details{
   471  		Servers: map[string]apiserver.APIServer{
   472  			"0": {ID: "0", InternalAddress: "127.0.0.1:1234"},
   473  		},
   474  	}:
   475  	case <-time.After(testing.LongWait):
   476  		c.Fatal("timed out waiting for cluster change to be processed")
   477  	}
   478  
   479  	// We do want a clean kill here, because we're not rebinding.
   480  	workertest.CleanKill(c, w)
   481  }
   482  
   483  func (s *workerSuite) TestWorkerStartupAsBootstrapNodeThenReconfigureWithLoopbackPreferredAndNotLoopbackBound(c *gc.C) {
   484  	defer s.setupMocks(c).Finish()
   485  
   486  	s.expectAnyLogs()
   487  	s.expectClock()
   488  	s.expectTrackedDBKill()
   489  
   490  	dataDir := c.MkDir()
   491  	mgrExp := s.nodeManager.EXPECT()
   492  	mgrExp.EnsureDataDir().Return(dataDir, nil).MinTimes(1)
   493  	mgrExp.WithLogFuncOption().Return(nil)
   494  	mgrExp.WithTracingOption().Return(nil)
   495  
   496  	// If this is a loopback preferred node, we do not invoke the TLS or
   497  	// cluster options.
   498  	mgrExp.IsExistingNode().Return(true, nil).Times(2)
   499  	mgrExp.IsLoopbackPreferred().Return(true).Times(2)
   500  	gomock.InOrder(
   501  		mgrExp.IsLoopbackBound(gomock.Any()).Return(false, nil),
   502  		mgrExp.IsLoopbackBound(gomock.Any()).Return(true, nil),
   503  	)
   504  
   505  	// Ensure that we expect a clean startup and shutdown.
   506  	s.expectNodeStartupAndShutdown()
   507  
   508  	s.hub.EXPECT().Subscribe(apiserver.DetailsTopic, gomock.Any()).Return(func() {}, nil)
   509  
   510  	s.client.EXPECT().Cluster(gomock.Any()).Return(nil, nil)
   511  
   512  	w := s.newWorker(c)
   513  	defer workertest.DirtyKill(c, w)
   514  	dbw := w.(*dbWorker)
   515  
   516  	ensureStartup(c, dbw)
   517  
   518  	// At this point we have started successfully.
   519  	// Push a message onto the API details channel to simulate changes.
   520  	select {
   521  	case dbw.apiServerChanges <- apiserver.Details{
   522  		Servers: map[string]apiserver.APIServer{
   523  			"0": {ID: "0", InternalAddress: "127.0.0.1:1234"},
   524  		},
   525  	}:
   526  	case <-time.After(testing.LongWait):
   527  		c.Fatal("timed out waiting for cluster change to be processed")
   528  	}
   529  
   530  	err := workertest.CheckKilled(c, w)
   531  	c.Assert(errors.Is(err, dependency.ErrBounce), jc.IsTrue)
   532  }
   533  
   534  func (s *workerSuite) setupMocks(c *gc.C) *gomock.Controller {
   535  	ctrl := s.baseSuite.setupMocks(c)
   536  	s.nodeManager = NewMockNodeManager(ctrl)
   537  	return ctrl
   538  }
   539  
   540  func (s *workerSuite) newWorker(c *gc.C) worker.Worker {
   541  	cfg := WorkerConfig{
   542  		NodeManager:  s.nodeManager,
   543  		Clock:        s.clock,
   544  		Hub:          s.hub,
   545  		ControllerID: "0",
   546  		Logger:       s.logger,
   547  		NewApp: func(string, ...app.Option) (DBApp, error) {
   548  			return s.dbApp, nil
   549  		},
   550  		NewDBWorker: func(context.Context, DBApp, string, ...TrackedDBWorkerOption) (TrackedDB, error) {
   551  			return s.trackedDB, nil
   552  		},
   553  		MetricsCollector: &Collector{},
   554  	}
   555  
   556  	w, err := newWorker(cfg)
   557  	c.Assert(err, jc.ErrorIsNil)
   558  	return w
   559  }