github.com/juju/juju@v0.0.0-20240430160146-1752b71fcf00/worker/caasunitprovisioner/worker_test.go (about)

     1  // Copyright 2017 Canonical Ltd.
     2  // Licensed under the AGPLv3, see LICENCE file for details.
     3  
     4  package caasunitprovisioner_test
     5  
     6  import (
     7  	"time"
     8  
     9  	"github.com/juju/charm/v12"
    10  	"github.com/juju/clock"
    11  	"github.com/juju/errors"
    12  	"github.com/juju/loggo"
    13  	"github.com/juju/names/v5"
    14  	"github.com/juju/retry"
    15  	"github.com/juju/testing"
    16  	jc "github.com/juju/testing/checkers"
    17  	"github.com/juju/worker/v3"
    18  	"github.com/juju/worker/v3/workertest"
    19  	"go.uber.org/mock/gomock"
    20  	gc "gopkg.in/check.v1"
    21  
    22  	"github.com/juju/juju/api/common/charms"
    23  	apicaasunitprovisioner "github.com/juju/juju/api/controller/caasunitprovisioner"
    24  	"github.com/juju/juju/caas"
    25  	"github.com/juju/juju/caas/specs"
    26  	"github.com/juju/juju/core/config"
    27  	"github.com/juju/juju/core/constraints"
    28  	"github.com/juju/juju/core/life"
    29  	"github.com/juju/juju/core/network"
    30  	"github.com/juju/juju/core/status"
    31  	"github.com/juju/juju/core/watcher/watchertest"
    32  	"github.com/juju/juju/rpc/params"
    33  	"github.com/juju/juju/storage"
    34  	coretesting "github.com/juju/juju/testing"
    35  	"github.com/juju/juju/worker/caasunitprovisioner"
    36  )
    37  
    38  type WorkerSuite struct {
    39  	testing.IsolationSuite
    40  
    41  	config             caasunitprovisioner.Config
    42  	applicationGetter  mockApplicationGetter
    43  	applicationUpdater mockApplicationUpdater
    44  	serviceBroker      mockServiceBroker
    45  	containerBroker    mockContainerBroker
    46  	podSpecGetter      mockProvisioningInfoGetterGetter
    47  	lifeGetter         mockLifeGetter
    48  	charmGetter        mockCharmGetter
    49  	unitUpdater        mockUnitUpdater
    50  	statusSetter       *caasunitprovisioner.MockProvisioningStatusSetter
    51  
    52  	applicationChanges      chan []string
    53  	applicationScaleChanges chan struct{}
    54  	caasUnitsChanges        chan struct{}
    55  	caasServiceChanges      chan struct{}
    56  	caasOperatorChanges     chan struct{}
    57  	containerSpecChanges    chan struct{}
    58  	serviceDeleted          chan struct{}
    59  	serviceEnsured          chan struct{}
    60  	serviceUpdated          chan struct{}
    61  	resourcesCleared        chan struct{}
    62  	appChanges              chan struct{}
    63  }
    64  
    65  var _ = gc.Suite(&WorkerSuite{})
    66  
    67  var (
    68  	containerSpec = `
    69  containers:
    70    - name: gitlab
    71      image: gitlab/latest
    72      ports:
    73      - containerPort: 80
    74        protocol: TCP
    75      - containerPort: 443
    76      config:
    77        attr: foo=bar; fred=blogs
    78        foo: bar
    79  `[1:]
    80  )
    81  
    82  func getParsedSpec() *specs.PodSpec {
    83  	parsedSpec := &specs.PodSpec{}
    84  	parsedSpec.Version = specs.CurrentVersion
    85  	parsedSpec.Containers = []specs.ContainerSpec{
    86  		{
    87  			Name:  "gitlab",
    88  			Image: "gitlab/latest",
    89  			Ports: []specs.ContainerPort{
    90  				{ContainerPort: 80, Protocol: "TCP"},
    91  				{ContainerPort: 443},
    92  			},
    93  			EnvConfig: map[string]interface{}{
    94  				"attr": "foo=bar; fred=blogs",
    95  				"foo":  "bar",
    96  			},
    97  		},
    98  	}
    99  	return parsedSpec
   100  }
   101  
   102  func getExpectedServiceParams() *caas.ServiceParams {
   103  	parsedSpec := getParsedSpec()
   104  	return &caas.ServiceParams{
   105  		PodSpec:      parsedSpec,
   106  		ResourceTags: map[string]string{"foo": "bar"},
   107  		Constraints:  constraints.MustParse("mem=4G"),
   108  		Deployment: caas.DeploymentParams{
   109  			DeploymentType: caas.DeploymentStateful,
   110  			ServiceType:    caas.ServiceLoadBalancer,
   111  		},
   112  		Filesystems: []storage.KubernetesFilesystemParams{{
   113  			StorageName: "database",
   114  			Size:        100,
   115  		}},
   116  	}
   117  }
   118  
   119  func (s *WorkerSuite) SetUpTest(c *gc.C) {
   120  	s.IsolationSuite.SetUpTest(c)
   121  
   122  	s.applicationChanges = make(chan []string)
   123  	s.applicationScaleChanges = make(chan struct{})
   124  	s.caasUnitsChanges = make(chan struct{})
   125  	s.caasServiceChanges = make(chan struct{})
   126  	s.caasOperatorChanges = make(chan struct{})
   127  	s.containerSpecChanges = make(chan struct{}, 1)
   128  	s.serviceDeleted = make(chan struct{})
   129  	s.serviceEnsured = make(chan struct{})
   130  	s.serviceUpdated = make(chan struct{})
   131  	s.resourcesCleared = make(chan struct{})
   132  	s.appChanges = make(chan struct{})
   133  
   134  	s.applicationGetter = mockApplicationGetter{
   135  		watcher:        watchertest.NewMockStringsWatcher(s.applicationChanges),
   136  		appWatcher:     watchertest.NewMockNotifyWatcher(s.appChanges),
   137  		scaleWatcher:   watchertest.NewMockNotifyWatcher(s.applicationScaleChanges),
   138  		deploymentMode: caas.ModeWorkload,
   139  	}
   140  	s.applicationUpdater = mockApplicationUpdater{
   141  		updated: s.serviceUpdated,
   142  		cleared: s.resourcesCleared,
   143  	}
   144  
   145  	s.podSpecGetter = mockProvisioningInfoGetterGetter{
   146  		watcher: watchertest.NewMockNotifyWatcher(s.containerSpecChanges),
   147  	}
   148  	s.podSpecGetter.setProvisioningInfo(apicaasunitprovisioner.ProvisioningInfo{
   149  		PodSpec:     containerSpec,
   150  		Tags:        map[string]string{"foo": "bar"},
   151  		Constraints: constraints.MustParse("mem=4G"),
   152  		DeploymentInfo: apicaasunitprovisioner.DeploymentInfo{
   153  			DeploymentType: "stateful",
   154  			ServiceType:    "loadbalancer",
   155  		},
   156  		Filesystems: []storage.KubernetesFilesystemParams{{
   157  			StorageName: "database",
   158  			Size:        100,
   159  		}},
   160  	})
   161  
   162  	s.unitUpdater = mockUnitUpdater{}
   163  
   164  	s.containerBroker = mockContainerBroker{
   165  		unitsWatcher:    watchertest.NewMockNotifyWatcher(s.caasUnitsChanges),
   166  		operatorWatcher: watchertest.NewMockNotifyWatcher(s.caasOperatorChanges),
   167  		units: []caas.Unit{
   168  			{
   169  				Id:       "u1",
   170  				Address:  "10.0.0.1",
   171  				Stateful: true,
   172  				FilesystemInfo: []caas.FilesystemInfo{
   173  					{MountPoint: "/path-to-here", ReadOnly: true, StorageName: "database",
   174  						Size: 100, FilesystemId: "fs-id",
   175  						Status: status.StatusInfo{Status: status.Attaching, Message: "not ready"},
   176  						Volume: caas.VolumeInfo{VolumeId: "vol-id", Size: 200, Persistent: true,
   177  							Status: status.StatusInfo{Status: status.Error, Message: "vol not ready"}},
   178  					},
   179  				},
   180  			},
   181  		},
   182  	}
   183  	s.lifeGetter = mockLifeGetter{}
   184  	s.lifeGetter.setLife(life.Alive)
   185  	s.charmGetter = mockCharmGetter{
   186  		charmInfo: &charms.CharmInfo{
   187  			Meta:     &charm.Meta{},
   188  			Manifest: &charm.Manifest{},
   189  		},
   190  	}
   191  	s.serviceBroker = mockServiceBroker{
   192  		ensured:        s.serviceEnsured,
   193  		deleted:        s.serviceDeleted,
   194  		serviceWatcher: watchertest.NewMockNotifyWatcher(s.caasServiceChanges),
   195  	}
   196  }
   197  
   198  func (s *WorkerSuite) sendContainerSpecChange(c *gc.C) {
   199  	select {
   200  	case s.containerSpecChanges <- struct{}{}:
   201  	case <-time.After(coretesting.LongWait):
   202  		c.Fatal("timed out sending pod spec change")
   203  	}
   204  }
   205  
   206  func (s *WorkerSuite) TestValidateConfig(c *gc.C) {
   207  	defer s.setupMocks(c).Finish()
   208  
   209  	s.testValidateConfig(c, func(config *caasunitprovisioner.Config) {
   210  		config.ApplicationGetter = nil
   211  	}, `missing ApplicationGetter not valid`)
   212  
   213  	s.testValidateConfig(c, func(config *caasunitprovisioner.Config) {
   214  		config.ApplicationUpdater = nil
   215  	}, `missing ApplicationUpdater not valid`)
   216  
   217  	s.testValidateConfig(c, func(config *caasunitprovisioner.Config) {
   218  		config.ServiceBroker = nil
   219  	}, `missing ServiceBroker not valid`)
   220  
   221  	s.testValidateConfig(c, func(config *caasunitprovisioner.Config) {
   222  		config.ContainerBroker = nil
   223  	}, `missing ContainerBroker not valid`)
   224  
   225  	s.testValidateConfig(c, func(config *caasunitprovisioner.Config) {
   226  		config.ProvisioningInfoGetter = nil
   227  	}, `missing ProvisioningInfoGetter not valid`)
   228  
   229  	s.testValidateConfig(c, func(config *caasunitprovisioner.Config) {
   230  		config.LifeGetter = nil
   231  	}, `missing LifeGetter not valid`)
   232  
   233  	s.testValidateConfig(c, func(config *caasunitprovisioner.Config) {
   234  		config.UnitUpdater = nil
   235  	}, `missing UnitUpdater not valid`)
   236  
   237  	s.testValidateConfig(c, func(config *caasunitprovisioner.Config) {
   238  		config.ProvisioningStatusSetter = nil
   239  	}, `missing ProvisioningStatusSetter not valid`)
   240  
   241  	s.testValidateConfig(c, func(config *caasunitprovisioner.Config) {
   242  		config.CharmGetter = nil
   243  	}, `missing CharmGetter not valid`)
   244  
   245  	s.testValidateConfig(c, func(config *caasunitprovisioner.Config) {
   246  		config.Logger = nil
   247  	}, `missing Logger not valid`)
   248  }
   249  
   250  func (s *WorkerSuite) testValidateConfig(c *gc.C, f func(*caasunitprovisioner.Config), expect string) {
   251  	config := s.config
   252  	f(&config)
   253  	w, err := caasunitprovisioner.NewWorker(config)
   254  	if err == nil {
   255  		workertest.DirtyKill(c, w)
   256  	}
   257  	c.Check(err, gc.ErrorMatches, expect)
   258  }
   259  
   260  func (s *WorkerSuite) TestStartStop(c *gc.C) {
   261  	defer s.setupMocks(c).Finish()
   262  
   263  	w, err := caasunitprovisioner.NewWorker(s.config)
   264  	c.Assert(err, jc.ErrorIsNil)
   265  	workertest.CheckAlive(c, w)
   266  	workertest.CleanKill(c, w)
   267  }
   268  
   269  func (s *WorkerSuite) setupNewUnitScenario(c *gc.C) worker.Worker {
   270  	s.statusSetter.EXPECT().SetOperatorStatus(
   271  		"gitlab", status.Waiting, "ensuring", map[string]interface{}{"foo": "bar"}).MinTimes(1)
   272  
   273  	w, err := caasunitprovisioner.NewWorker(s.config)
   274  	c.Assert(err, jc.ErrorIsNil)
   275  
   276  	s.podSpecGetter.SetErrors(nil, errors.NotFoundf("spec"))
   277  
   278  	s.sendApplicationChanges(c, "gitlab")
   279  
   280  	s.applicationGetter.scale = 1
   281  	select {
   282  	case s.applicationScaleChanges <- struct{}{}:
   283  	case <-time.After(coretesting.LongWait):
   284  		c.Fatal("timed out sending scale change")
   285  	}
   286  
   287  	// We seed a "not found" error above to indicate that
   288  	// there is not yet a pod spec; the broker should
   289  	// not be invoked.
   290  	s.sendContainerSpecChange(c)
   291  	select {
   292  	case <-s.serviceEnsured:
   293  		c.Fatal("service ensured unexpectedly")
   294  	case <-time.After(coretesting.ShortWait):
   295  	}
   296  
   297  	s.sendContainerSpecChange(c)
   298  	s.podSpecGetter.assertSpecRetrieved(c)
   299  	select {
   300  	case <-s.serviceEnsured:
   301  	case <-time.After(coretesting.LongWait):
   302  		c.Fatal("timed out waiting for service to be ensured")
   303  	}
   304  
   305  	select {
   306  	case <-s.serviceUpdated:
   307  	case <-time.After(coretesting.LongWait):
   308  		c.Fatal("timed out waiting for service to be updated")
   309  	}
   310  	return w
   311  }
   312  
   313  func (s *WorkerSuite) TestScaleChangedInJuju(c *gc.C) {
   314  	defer s.setupMocks(c).Finish()
   315  
   316  	w := s.setupNewUnitScenario(c)
   317  	defer workertest.CleanKill(c, w)
   318  
   319  	var appCallNames []string
   320  	for _, call := range s.applicationGetter.Calls() {
   321  		appCallNames = append(appCallNames, call.FuncName)
   322  	}
   323  	c.Check(appCallNames, jc.SameContents, []string{"WatchApplications", "DeploymentMode", "WatchApplicationScale", "WatchApplication", "ApplicationScale", "ApplicationConfig"})
   324  
   325  	s.podSpecGetter.CheckCallNames(c, "WatchPodSpec", "ProvisioningInfo", "ProvisioningInfo")
   326  	s.podSpecGetter.CheckCall(c, 0, "WatchPodSpec", "gitlab")
   327  	s.podSpecGetter.CheckCall(c, 1, "ProvisioningInfo", "gitlab") // not found
   328  	s.podSpecGetter.CheckCall(c, 2, "ProvisioningInfo", "gitlab")
   329  	s.lifeGetter.CheckCallNames(c, "Life")
   330  	s.lifeGetter.CheckCall(c, 0, "Life", "gitlab")
   331  	s.serviceBroker.CheckCallNames(c, "WatchService", "EnsureService", "GetService")
   332  	s.serviceBroker.CheckCall(c, 1, "EnsureService",
   333  		"gitlab", getExpectedServiceParams(), 1, config.ConfigAttributes{"juju-external-hostname": "exthost"})
   334  	s.serviceBroker.CheckCall(c, 2, "GetService", "gitlab", caas.ModeWorkload)
   335  
   336  	s.serviceBroker.ResetCalls()
   337  	// Add another unit.
   338  	s.applicationGetter.scale = 2
   339  	select {
   340  	case s.applicationScaleChanges <- struct{}{}:
   341  	case <-time.After(coretesting.LongWait):
   342  		c.Fatal("timed out sending scale change")
   343  	}
   344  
   345  	select {
   346  	case <-s.serviceEnsured:
   347  	case <-time.After(coretesting.LongWait):
   348  		c.Fatal("timed out waiting for service to be ensured")
   349  	}
   350  
   351  	newExpectedParams := getExpectedServiceParams()
   352  	s.serviceBroker.CheckCallNames(c, "EnsureService")
   353  	s.serviceBroker.CheckCall(c, 0, "EnsureService",
   354  		"gitlab", newExpectedParams, 2, config.ConfigAttributes{"juju-external-hostname": "exthost"})
   355  
   356  	s.serviceBroker.ResetCalls()
   357  	// Delete a unit.
   358  	s.applicationGetter.scale = 1
   359  	select {
   360  	case s.applicationScaleChanges <- struct{}{}:
   361  	case <-time.After(coretesting.LongWait):
   362  		c.Fatal("timed out sending scale change")
   363  	}
   364  
   365  	select {
   366  	case <-s.serviceEnsured:
   367  	case <-time.After(coretesting.LongWait):
   368  		c.Fatal("timed out waiting for service to be ensured")
   369  	}
   370  
   371  	s.serviceBroker.CheckCallNames(c, "EnsureService")
   372  	s.serviceBroker.CheckCall(c, 0, "EnsureService",
   373  		"gitlab", newExpectedParams, 1, config.ConfigAttributes{"juju-external-hostname": "exthost"})
   374  }
   375  
   376  func intPtr(i int) *int {
   377  	return &i
   378  }
   379  
   380  func (s *WorkerSuite) TestScaleChangedInCluster(c *gc.C) {
   381  	defer s.setupMocks(c).Finish()
   382  
   383  	w := s.setupNewUnitScenario(c)
   384  	defer workertest.CleanKill(c, w)
   385  
   386  	s.containerBroker.ResetCalls()
   387  	s.applicationUpdater.ResetCalls()
   388  	s.serviceBroker.ResetCalls()
   389  	s.serviceBroker.serviceStatus = status.StatusInfo{
   390  		Status:  status.Active,
   391  		Message: "working",
   392  	}
   393  
   394  	s.unitUpdater.unitsInfo = &params.UpdateApplicationUnitsInfo{
   395  		Units: []params.ApplicationUnitInfo{
   396  			{ProviderId: "u1", UnitTag: "unit-gitlab-0"},
   397  		},
   398  	}
   399  
   400  	select {
   401  	case s.caasServiceChanges <- struct{}{}:
   402  	case <-time.After(coretesting.LongWait):
   403  		c.Fatal("timed out sending service change")
   404  	}
   405  
   406  	retryCallArgs := coretesting.LongRetryStrategy
   407  	retryCallArgs.Func = func() error {
   408  		if len(s.serviceBroker.Calls()) > 0 {
   409  			return nil
   410  		}
   411  		return errors.Errorf("Not enough calls yet")
   412  	}
   413  	err := retry.Call(retryCallArgs)
   414  	c.Assert(err, jc.ErrorIsNil)
   415  
   416  	s.serviceBroker.CheckCallNames(c, "GetService")
   417  	c.Assert(s.serviceBroker.Calls()[0].Args, jc.DeepEquals, []interface{}{"gitlab", caas.ModeWorkload})
   418  
   419  	select {
   420  	case <-s.serviceUpdated:
   421  		s.applicationUpdater.CheckCallNames(c, "UpdateApplicationService")
   422  		c.Assert(s.applicationUpdater.Calls()[0].Args, jc.DeepEquals, []interface{}{
   423  			params.UpdateApplicationServiceArg{
   424  				ApplicationTag: names.NewApplicationTag("gitlab").String(),
   425  				ProviderId:     "id",
   426  				Addresses:      params.FromProviderAddresses(network.NewMachineAddresses([]string{"10.0.0.1"}).AsProviderAddresses()...),
   427  				Scale:          intPtr(4),
   428  			},
   429  		})
   430  	case <-time.After(coretesting.LongWait):
   431  		c.Fatal("timed out waiting for service to be updated")
   432  	}
   433  
   434  	retryCallArgs.Func = func() error {
   435  		if len(s.containerBroker.Calls()) >= 2 {
   436  			return nil
   437  		}
   438  		return errors.Errorf("Not enough calls yet")
   439  	}
   440  	err = retry.Call(retryCallArgs)
   441  	c.Assert(err, jc.ErrorIsNil)
   442  	if !s.containerBroker.CheckCallNames(c, "Units", "AnnotateUnit") {
   443  		return
   444  	}
   445  	c.Assert(s.containerBroker.Calls()[0].Args, jc.DeepEquals, []interface{}{"gitlab", caas.ModeWorkload})
   446  	c.Assert(s.containerBroker.Calls()[1].Args, jc.DeepEquals, []interface{}{"gitlab", caas.ModeWorkload, "u1", names.NewUnitTag("gitlab/0")})
   447  
   448  	retryCallArgs.Func = func() error {
   449  		if len(s.unitUpdater.Calls()) > 0 {
   450  			return nil
   451  		}
   452  		return errors.Errorf("Not enough calls yet")
   453  	}
   454  	err = retry.Call(retryCallArgs)
   455  	c.Assert(err, jc.ErrorIsNil)
   456  
   457  	s.unitUpdater.CheckCallNames(c, "UpdateUnits")
   458  	scale := 4
   459  	c.Assert(s.unitUpdater.Calls()[0].Args, jc.DeepEquals, []interface{}{
   460  		params.UpdateApplicationUnits{
   461  			ApplicationTag: names.NewApplicationTag("gitlab").String(),
   462  			Scale:          &scale,
   463  			Status: params.EntityStatus{
   464  				Status: status.Active,
   465  				Info:   "working",
   466  			},
   467  			Units: []params.ApplicationUnitParams{
   468  				{ProviderId: "u1", Address: "10.0.0.1", Ports: []string(nil),
   469  					Stateful: true,
   470  					FilesystemInfo: []params.KubernetesFilesystemInfo{
   471  						{StorageName: "database", MountPoint: "/path-to-here", ReadOnly: true,
   472  							FilesystemId: "fs-id", Size: 100, Pool: "",
   473  							Volume: params.KubernetesVolumeInfo{
   474  								VolumeId: "vol-id", Size: 200,
   475  								Persistent: true, Status: "error", Info: "vol not ready"},
   476  							Status: "attaching", Info: "not ready"},
   477  					}},
   478  			},
   479  		},
   480  	})
   481  }
   482  
   483  func (s *WorkerSuite) TestNewPodSpecChange(c *gc.C) {
   484  	defer s.setupMocks(c).Finish()
   485  
   486  	w := s.setupNewUnitScenario(c)
   487  	defer workertest.CleanKill(c, w)
   488  
   489  	s.serviceBroker.ResetCalls()
   490  
   491  	// Same spec, nothing happens.
   492  	s.sendContainerSpecChange(c)
   493  	s.podSpecGetter.assertSpecRetrieved(c)
   494  	select {
   495  	case <-s.serviceEnsured:
   496  		c.Fatal("service/unit ensured unexpectedly")
   497  	case <-time.After(coretesting.ShortWait):
   498  	}
   499  
   500  	var (
   501  		anotherSpec = `
   502  containers:
   503    - name: gitlab
   504      image: gitlab/latest
   505  `[1:]
   506  	)
   507  	anotherParsedSpec := &specs.PodSpec{}
   508  	anotherParsedSpec.Version = specs.CurrentVersion
   509  	anotherParsedSpec.Containers = []specs.ContainerSpec{{
   510  		Name:  "gitlab",
   511  		Image: "gitlab/latest",
   512  	}}
   513  
   514  	s.podSpecGetter.setProvisioningInfo(apicaasunitprovisioner.ProvisioningInfo{
   515  		PodSpec:     anotherSpec,
   516  		Tags:        map[string]string{"foo": "bar"},
   517  		Constraints: constraints.MustParse("mem=4G"),
   518  		DeploymentInfo: apicaasunitprovisioner.DeploymentInfo{
   519  			DeploymentType: "stateful",
   520  			ServiceType:    "loadbalancer",
   521  		},
   522  		Filesystems: []storage.KubernetesFilesystemParams{{
   523  			StorageName: "database",
   524  			Size:        100,
   525  		}},
   526  	})
   527  	s.sendContainerSpecChange(c)
   528  	s.podSpecGetter.assertSpecRetrieved(c)
   529  
   530  	select {
   531  	case <-s.serviceEnsured:
   532  	case <-time.After(coretesting.LongWait):
   533  		c.Fatal("timed out waiting for service to be ensured")
   534  	}
   535  
   536  	expectedParams := &caas.ServiceParams{
   537  		PodSpec:      anotherParsedSpec,
   538  		ResourceTags: map[string]string{"foo": "bar"},
   539  		Constraints:  constraints.MustParse("mem=4G"),
   540  		Deployment: caas.DeploymentParams{
   541  			DeploymentType: "stateful",
   542  			ServiceType:    "loadbalancer",
   543  		},
   544  		Filesystems: []storage.KubernetesFilesystemParams{{
   545  			StorageName: "database",
   546  			Size:        100,
   547  		}},
   548  	}
   549  	s.serviceBroker.CheckCallNames(c, "EnsureService")
   550  	s.serviceBroker.CheckCall(c, 0, "EnsureService",
   551  		"gitlab", expectedParams, 1, config.ConfigAttributes{"juju-external-hostname": "exthost"})
   552  }
   553  
   554  func (s *WorkerSuite) TestInvalidDeploymentChange(c *gc.C) {
   555  	defer s.setupMocks(c).Finish()
   556  
   557  	s.statusSetter.EXPECT().SetOperatorStatus(
   558  		"gitlab", status.Error, "k8s does not support updating storage", map[string]interface{}(nil))
   559  
   560  	w := s.setupNewUnitScenario(c)
   561  	defer workertest.CleanKill(c, w)
   562  
   563  	s.serviceBroker.ResetCalls()
   564  
   565  	// Same spec, nothing happens.
   566  	s.sendContainerSpecChange(c)
   567  	s.podSpecGetter.assertSpecRetrieved(c)
   568  	select {
   569  	case <-s.serviceEnsured:
   570  		c.Fatal("service/unit ensured unexpectedly")
   571  	case <-time.After(coretesting.ShortWait):
   572  	}
   573  
   574  	var (
   575  		anotherSpec = `
   576  containers:
   577    - name: gitlab
   578      image: gitlab/latest
   579  `[1:]
   580  	)
   581  	anotherParsedSpec := &specs.PodSpec{}
   582  	anotherParsedSpec.Version = specs.CurrentVersion
   583  	anotherParsedSpec.Containers = []specs.ContainerSpec{{
   584  		Name:  "gitlab",
   585  		Image: "gitlab/latest",
   586  	}}
   587  
   588  	s.podSpecGetter.setProvisioningInfo(apicaasunitprovisioner.ProvisioningInfo{
   589  		PodSpec:     anotherSpec,
   590  		Tags:        map[string]string{"foo": "bar"},
   591  		Constraints: constraints.MustParse("mem=4G"),
   592  		DeploymentInfo: apicaasunitprovisioner.DeploymentInfo{
   593  			DeploymentType: "stateful",
   594  			ServiceType:    "loadbalancer",
   595  		},
   596  		Filesystems: []storage.KubernetesFilesystemParams{{
   597  			StorageName: "database",
   598  			Size:        100,
   599  		}, {
   600  			StorageName: "logs",
   601  			Size:        100,
   602  		}},
   603  	})
   604  	s.sendContainerSpecChange(c)
   605  	s.podSpecGetter.assertSpecRetrieved(c)
   606  
   607  	c.Assert(s.serviceBroker.Calls(), gc.HasLen, 0)
   608  }
   609  
   610  func (s *WorkerSuite) TestScaleZero(c *gc.C) {
   611  	defer s.setupMocks(c).Finish()
   612  
   613  	w := s.setupNewUnitScenario(c)
   614  	defer workertest.CleanKill(c, w)
   615  
   616  	s.serviceBroker.ResetCalls()
   617  	// Add another unit.
   618  	s.applicationGetter.scale = 2
   619  	select {
   620  	case s.applicationScaleChanges <- struct{}{}:
   621  	case <-time.After(coretesting.LongWait):
   622  		c.Fatal("timed out sending scale change")
   623  	}
   624  
   625  	select {
   626  	case <-s.serviceEnsured:
   627  	case <-time.After(coretesting.LongWait):
   628  		c.Fatal("timed out waiting for service to be ensured")
   629  	}
   630  	s.serviceBroker.ResetCalls()
   631  
   632  	// Now the scale down to 0.
   633  	s.applicationGetter.scale = 0
   634  	select {
   635  	case s.applicationScaleChanges <- struct{}{}:
   636  	case <-time.After(coretesting.LongWait):
   637  		c.Fatal("timed out sending scale change")
   638  	}
   639  
   640  	select {
   641  	case <-s.serviceEnsured:
   642  	case <-time.After(coretesting.LongWait):
   643  		c.Fatal("timed out waiting for service to be ensured")
   644  	}
   645  	s.serviceBroker.CheckCallNames(c, "EnsureService")
   646  	s.serviceBroker.CheckCall(c, 0, "EnsureService",
   647  		"gitlab", &caas.ServiceParams{}, 0, config.ConfigAttributes(nil))
   648  }
   649  
   650  func (s *WorkerSuite) TestApplicationDeadRemovesService(c *gc.C) {
   651  	defer s.setupMocks(c).Finish()
   652  
   653  	w := s.setupNewUnitScenario(c)
   654  	defer workertest.CleanKill(c, w)
   655  
   656  	s.serviceBroker.ResetCalls()
   657  	s.containerBroker.ResetCalls()
   658  
   659  	s.lifeGetter.SetErrors(errors.NotFoundf("application"))
   660  	s.sendApplicationChanges(c, "gitlab")
   661  
   662  	select {
   663  	case <-s.serviceDeleted:
   664  	case <-time.After(coretesting.LongWait):
   665  		c.Fatal("timed out waiting for service to be deleted")
   666  	}
   667  
   668  	s.serviceBroker.CheckCallNames(c, "UnexposeService", "DeleteService")
   669  	s.serviceBroker.CheckCall(c, 0, "UnexposeService", "gitlab")
   670  	s.serviceBroker.CheckCall(c, 1, "DeleteService", "gitlab")
   671  }
   672  
   673  func (s *WorkerSuite) TestWatchApplicationDead(c *gc.C) {
   674  	defer s.setupMocks(c).Finish()
   675  
   676  	w, err := caasunitprovisioner.NewWorker(s.config)
   677  	c.Assert(err, jc.ErrorIsNil)
   678  	defer workertest.CleanKill(c, w)
   679  
   680  	s.lifeGetter.setLife(life.Dead)
   681  	s.sendApplicationChanges(c, "gitlab")
   682  
   683  	select {
   684  	case <-s.serviceDeleted:
   685  	case <-time.After(coretesting.LongWait):
   686  		c.Fatal("timed out waiting for service to be deleted")
   687  	}
   688  
   689  	select {
   690  	case s.applicationScaleChanges <- struct{}{}:
   691  		c.Fatal("unexpected watch for application scale")
   692  	case <-time.After(coretesting.ShortWait):
   693  	}
   694  
   695  	workertest.CleanKill(c, w)
   696  	// There should just be the initial watch call, no subsequent calls to watch/get scale etc.
   697  	s.applicationGetter.CheckCallNames(c, "WatchApplications", "DeploymentMode")
   698  }
   699  
   700  func (s *WorkerSuite) TestRemoveApplicationStopsWatchingApplicationScale(c *gc.C) {
   701  	defer s.setupMocks(c).Finish()
   702  
   703  	w, err := caasunitprovisioner.NewWorker(s.config)
   704  	c.Assert(err, jc.ErrorIsNil)
   705  	defer workertest.CleanKill(c, w)
   706  
   707  	s.sendApplicationChanges(c, "gitlab")
   708  
   709  	// Check that the gitlab worker is running or not;
   710  	// given it time to startup.
   711  	startingRetryCallArgs := retry.CallArgs{
   712  		Clock:       clock.WallClock,
   713  		MaxDuration: coretesting.LongWait,
   714  		Delay:       10 * time.Millisecond,
   715  		Func: func() error {
   716  			_, running := caasunitprovisioner.AppWorker(w, "gitlab")
   717  			if running {
   718  				return nil
   719  			}
   720  			return errors.NotYetAvailablef("not running")
   721  		},
   722  	}
   723  	err = retry.Call(startingRetryCallArgs)
   724  	c.Assert(err, jc.ErrorIsNil)
   725  
   726  	// Add an additional app worker so we can check that the correct one is accessed.
   727  	caasunitprovisioner.NewAppWorker(w, "mysql")
   728  
   729  	s.lifeGetter.SetErrors(errors.NotFoundf("application"))
   730  	s.sendApplicationChanges(c, "gitlab")
   731  
   732  	select {
   733  	case <-s.serviceDeleted:
   734  	case <-time.After(coretesting.LongWait):
   735  		c.Fatal("timed out waiting for service to be deleted")
   736  	}
   737  
   738  	// The mysql worker should still be running.
   739  	_, ok := caasunitprovisioner.AppWorker(w, "mysql")
   740  	c.Assert(ok, jc.IsTrue)
   741  
   742  	// Check that the gitlab worker is running or not;
   743  	// given it time to shutdown.
   744  	stoppingRetryCallArgs := retry.CallArgs{
   745  		Clock:       clock.WallClock,
   746  		MaxDuration: coretesting.LongWait,
   747  		Delay:       10 * time.Millisecond,
   748  		Func: func() error {
   749  			_, running := caasunitprovisioner.AppWorker(w, "gitlab")
   750  			if !running {
   751  				return nil
   752  			}
   753  			return errors.NotYetAvailablef("still running")
   754  		},
   755  	}
   756  	err = retry.Call(stoppingRetryCallArgs)
   757  	c.Assert(err, jc.ErrorIsNil)
   758  	workertest.CheckKilled(c, s.applicationGetter.scaleWatcher)
   759  }
   760  
   761  func (s *WorkerSuite) TestRemoveWorkloadApplicationWaitsForResources(c *gc.C) {
   762  	defer s.setupMocks(c).Finish()
   763  
   764  	w, err := caasunitprovisioner.NewWorker(s.config)
   765  	c.Assert(err, jc.ErrorIsNil)
   766  	defer workertest.CleanKill(c, w)
   767  
   768  	s.sendApplicationChanges(c, "gitlab")
   769  
   770  	// Check that the gitlab worker is running or not;
   771  	// given it time to startup.
   772  	startingRetryCallArgs := retry.CallArgs{
   773  		Clock:       clock.WallClock,
   774  		MaxDuration: coretesting.LongWait,
   775  		Delay:       10 * time.Millisecond,
   776  		Func: func() error {
   777  			_, running := caasunitprovisioner.AppWorker(w, "gitlab")
   778  			if running {
   779  				return nil
   780  			}
   781  			return errors.NotYetAvailablef("not running")
   782  		},
   783  	}
   784  	err = retry.Call(startingRetryCallArgs)
   785  	c.Assert(err, jc.ErrorIsNil)
   786  
   787  	s.lifeGetter.SetErrors(errors.NotFoundf("application"))
   788  	s.sendApplicationChanges(c, "gitlab")
   789  
   790  	select {
   791  	case <-s.serviceDeleted:
   792  	case <-time.After(coretesting.LongWait):
   793  		c.Fatal("timed out waiting for service to be deleted")
   794  	}
   795  
   796  	// Check that the gitlab worker is running or not;
   797  	// given it time to shutdown.
   798  	stoppingRetryCallArgs := retry.CallArgs{
   799  		Clock:       clock.WallClock,
   800  		MaxDuration: coretesting.LongWait,
   801  		Delay:       10 * time.Millisecond,
   802  		Func: func() error {
   803  			_, running := caasunitprovisioner.AppWorker(w, "gitlab")
   804  			if !running {
   805  				return nil
   806  			}
   807  			return errors.NotYetAvailablef("still running")
   808  		},
   809  	}
   810  	err = retry.Call(stoppingRetryCallArgs)
   811  	c.Assert(err, jc.ErrorIsNil)
   812  
   813  	// Check the undertaker worker clears application resources.
   814  	s.containerBroker.SetErrors(nil, errors.NotFoundf("operator"))
   815  	s.containerBroker.units = nil
   816  
   817  	select {
   818  	case s.caasUnitsChanges <- struct{}{}:
   819  	case <-time.After(coretesting.LongWait):
   820  		c.Fatal("timed out sending units change")
   821  	}
   822  
   823  	select {
   824  	case s.caasOperatorChanges <- struct{}{}:
   825  	case <-time.After(coretesting.LongWait):
   826  		c.Fatal("timed out sending operator change")
   827  	}
   828  
   829  	select {
   830  	case <-s.resourcesCleared:
   831  	case <-time.After(coretesting.LongWait):
   832  		c.Fatal("timed out waiting for resources to be cleared")
   833  	}
   834  }
   835  
   836  func (s *WorkerSuite) TestRemoveOperatorApplicationWaitsForResources(c *gc.C) {
   837  	defer s.setupMocks(c).Finish()
   838  	s.applicationGetter.deploymentMode = caas.ModeOperator
   839  
   840  	w, err := caasunitprovisioner.NewWorker(s.config)
   841  	c.Assert(err, jc.ErrorIsNil)
   842  	defer workertest.CleanKill(c, w)
   843  
   844  	s.sendApplicationChanges(c, "gitlab")
   845  
   846  	// Check that the gitlab worker is running or not;
   847  	// given it time to startup.
   848  	startingRetryCallArgs := retry.CallArgs{
   849  		Clock:       clock.WallClock,
   850  		MaxDuration: coretesting.LongWait,
   851  		Delay:       10 * time.Millisecond,
   852  		Func: func() error {
   853  			_, running := caasunitprovisioner.AppWorker(w, "gitlab")
   854  			if running {
   855  				return nil
   856  			}
   857  			return errors.NotYetAvailablef("not running")
   858  		},
   859  	}
   860  	err = retry.Call(startingRetryCallArgs)
   861  	c.Assert(err, jc.ErrorIsNil)
   862  
   863  	s.lifeGetter.SetErrors(errors.NotFoundf("application"))
   864  	s.sendApplicationChanges(c, "gitlab")
   865  
   866  	select {
   867  	case <-s.serviceDeleted:
   868  	case <-time.After(coretesting.LongWait):
   869  		c.Fatal("timed out waiting for service to be deleted")
   870  	}
   871  
   872  	// Check that the gitlab worker is running or not;
   873  	// given it time to shutdown.
   874  	stoppingRetryCallArgs := retry.CallArgs{
   875  		Clock:       clock.WallClock,
   876  		MaxDuration: coretesting.LongWait,
   877  		Delay:       10 * time.Millisecond,
   878  		Func: func() error {
   879  			_, running := caasunitprovisioner.AppWorker(w, "gitlab")
   880  			if !running {
   881  				return nil
   882  			}
   883  			return errors.NotYetAvailablef("still running")
   884  		},
   885  	}
   886  	err = retry.Call(stoppingRetryCallArgs)
   887  	c.Assert(err, jc.ErrorIsNil)
   888  
   889  	// Check the undertaker worker clears application resources.
   890  	s.containerBroker.units = nil
   891  	select {
   892  	case s.caasUnitsChanges <- struct{}{}:
   893  	case <-time.After(coretesting.LongWait):
   894  		c.Fatal("timed out sending units change")
   895  	}
   896  
   897  	select {
   898  	case <-s.resourcesCleared:
   899  	case <-time.After(coretesting.LongWait):
   900  		c.Fatal("timed out waiting for resources to be cleared")
   901  	}
   902  }
   903  
   904  func (s *WorkerSuite) TestWatcherErrorStopsWorker(c *gc.C) {
   905  	defer s.setupMocks(c).Finish()
   906  
   907  	w, err := caasunitprovisioner.NewWorker(s.config)
   908  	c.Assert(err, jc.ErrorIsNil)
   909  	defer workertest.DirtyKill(c, w)
   910  
   911  	s.applicationGetter.scale = 1
   912  	s.sendApplicationChanges(c, "gitlab")
   913  
   914  	select {
   915  	case s.applicationScaleChanges <- struct{}{}:
   916  	case <-time.After(coretesting.LongWait):
   917  		c.Fatal("timed out sending scale change")
   918  	}
   919  
   920  	s.podSpecGetter.watcher.KillErr(errors.New("splat"))
   921  	workertest.CheckKilled(c, s.podSpecGetter.watcher)
   922  	workertest.CheckKilled(c, s.applicationGetter.watcher)
   923  	err = workertest.CheckKilled(c, w)
   924  	c.Assert(err, gc.ErrorMatches, "splat")
   925  }
   926  
   927  func (s *WorkerSuite) TestUnitsChange(c *gc.C) {
   928  	w, err := caasunitprovisioner.NewWorker(s.config)
   929  	c.Assert(err, jc.ErrorIsNil)
   930  	defer workertest.DirtyKill(c, w)
   931  
   932  	s.sendApplicationChanges(c, "gitlab")
   933  	defer workertest.CleanKill(c, w)
   934  
   935  	retryCallArgs := coretesting.LongRetryStrategy
   936  	retryCallArgs.Func = func() error {
   937  		if len(s.containerBroker.Calls()) >= 2 {
   938  			return nil
   939  		}
   940  		return errors.Errorf("Not enough calls yet")
   941  	}
   942  	err = retry.Call(retryCallArgs)
   943  	c.Assert(err, jc.ErrorIsNil)
   944  
   945  	s.containerBroker.CheckCallNames(c, "WatchUnits", "WatchOperator")
   946  
   947  	s.assertUnitChange(c, status.Allocating, status.Allocating)
   948  	s.assertUnitChange(c, status.Allocating, status.Unknown)
   949  }
   950  
   951  func (s *WorkerSuite) TestOperatorChange(c *gc.C) {
   952  	defer s.setupMocks(c).Finish()
   953  
   954  	s.statusSetter.EXPECT().SetOperatorStatus(
   955  		"gitlab", status.Active, "testing 1. 2. 3.", map[string]interface{}{"zip": "zap"})
   956  
   957  	w, err := caasunitprovisioner.NewWorker(s.config)
   958  	c.Assert(err, jc.ErrorIsNil)
   959  	defer workertest.DirtyKill(c, w)
   960  
   961  	s.sendApplicationChanges(c, "gitlab")
   962  
   963  	retryCallArgs := coretesting.LongRetryStrategy
   964  	retryCallArgs.Func = func() error {
   965  		if len(s.containerBroker.Calls()) >= 2 {
   966  			return nil
   967  		}
   968  		return errors.Errorf("Not enough calls yet")
   969  	}
   970  	err = retry.Call(retryCallArgs)
   971  	c.Assert(err, jc.ErrorIsNil)
   972  
   973  	s.containerBroker.CheckCallNames(c, "WatchUnits", "WatchOperator")
   974  	s.containerBroker.ResetCalls()
   975  
   976  	// Initial event
   977  	s.containerBroker.SetErrors(errors.NotFoundf("gitlab"))
   978  	select {
   979  	case s.caasOperatorChanges <- struct{}{}:
   980  	case <-time.After(coretesting.LongWait):
   981  		c.Fatal("timed out sending applications change")
   982  	}
   983  
   984  	retryCallArgs.Func = func() error {
   985  		if len(s.containerBroker.Calls()) > 0 {
   986  			return nil
   987  		}
   988  		return errors.Errorf("Not enough calls yet")
   989  	}
   990  	err = retry.Call(retryCallArgs)
   991  	c.Assert(err, jc.ErrorIsNil)
   992  
   993  	s.containerBroker.CheckCallNames(c, "Operator")
   994  	c.Assert(s.containerBroker.Calls()[0].Args, jc.DeepEquals, []interface{}{"gitlab"})
   995  	s.containerBroker.ResetCalls()
   996  
   997  	select {
   998  	case s.caasOperatorChanges <- struct{}{}:
   999  	case <-time.After(coretesting.LongWait):
  1000  		c.Fatal("timed out sending applications change")
  1001  	}
  1002  	s.containerBroker.reportedOperatorStatus = status.Active
  1003  	retryCallArgs.Func = func() error {
  1004  		if len(s.containerBroker.Calls()) > 0 {
  1005  			return nil
  1006  		}
  1007  		return errors.Errorf("Not enough calls yet")
  1008  	}
  1009  	err = retry.Call(retryCallArgs)
  1010  	c.Assert(err, jc.ErrorIsNil)
  1011  
  1012  	s.containerBroker.CheckCallNames(c, "Operator")
  1013  	c.Assert(s.containerBroker.Calls()[0].Args, jc.DeepEquals, []interface{}{"gitlab"})
  1014  
  1015  }
  1016  
  1017  func (s *WorkerSuite) TestV2CharmSkipsProcessing(c *gc.C) {
  1018  	ctrl := s.setupMocks(c)
  1019  
  1020  	// Make it a v2 charm
  1021  	s.charmGetter.charmInfo.Manifest = &charm.Manifest{Bases: []charm.Base{{}}}
  1022  
  1023  	w, err := caasunitprovisioner.NewWorker(s.config)
  1024  	c.Assert(err, jc.ErrorIsNil)
  1025  
  1026  	s.sendApplicationChanges(c, "gitlab")
  1027  
  1028  	s.lifeGetter.CheckNoCalls(c)
  1029  
  1030  	workertest.CheckAlive(c, w)
  1031  	workertest.CleanKill(c, w)
  1032  
  1033  	ctrl.Finish()
  1034  }
  1035  
  1036  func (s *WorkerSuite) TestNotFoundCharmSkipsProcessing(c *gc.C) {
  1037  	ctrl := s.setupMocks(c)
  1038  
  1039  	s.charmGetter.charmInfo = nil // ApplicationCharmInfo will return NotFound error
  1040  
  1041  	w, err := caasunitprovisioner.NewWorker(s.config)
  1042  	c.Assert(err, jc.ErrorIsNil)
  1043  
  1044  	s.sendApplicationChanges(c, "gitlab")
  1045  
  1046  	s.lifeGetter.CheckNoCalls(c)
  1047  
  1048  	workertest.CheckAlive(c, w)
  1049  	workertest.CleanKill(c, w)
  1050  
  1051  	ctrl.Finish()
  1052  }
  1053  
  1054  func (s *WorkerSuite) TestV2CharmExitsApplicationWorker(c *gc.C) {
  1055  	ctrl := s.setupMocks(c)
  1056  	defer ctrl.Finish()
  1057  
  1058  	w, err := caasunitprovisioner.NewWorker(s.config)
  1059  	c.Assert(err, jc.ErrorIsNil)
  1060  	defer workertest.DirtyKill(c, w)
  1061  
  1062  	waitCharmGetterCalls := func(names ...string) {
  1063  		retryCallArgs := coretesting.LongRetryStrategy
  1064  		retryCallArgs.Func = func() error {
  1065  			if len(s.charmGetter.Calls()) >= len(names) {
  1066  				return nil
  1067  			}
  1068  			return errors.Errorf("Not enough calls yet")
  1069  		}
  1070  		err = retry.Call(retryCallArgs)
  1071  		c.Assert(err, jc.ErrorIsNil)
  1072  		s.charmGetter.CheckCallNames(c, names...)
  1073  		s.charmGetter.ResetCalls()
  1074  	}
  1075  
  1076  	// Will trigger ApplicationCharmInfo call in main worker
  1077  	s.sendApplicationChanges(c, "gitlab")
  1078  	waitCharmGetterCalls("ApplicationCharmInfo")
  1079  
  1080  	// Wait till application worker has started up
  1081  	var appWorker worker.Worker
  1082  	retryCallArgs := coretesting.LongRetryStrategy
  1083  	retryCallArgs.Func = func() error {
  1084  		aw, ok := caasunitprovisioner.AppWorker(w, "gitlab")
  1085  		if ok {
  1086  			appWorker = aw
  1087  			return nil
  1088  		}
  1089  		return errors.NotYetAvailablef("worker not up yet")
  1090  	}
  1091  	err = retry.Call(retryCallArgs)
  1092  	c.Assert(err, jc.ErrorIsNil)
  1093  	c.Assert(appWorker, gc.NotNil)
  1094  
  1095  	// Make it a v2 charm (will make the application worker exit)
  1096  	s.charmGetter.charmInfo.Manifest = &charm.Manifest{Bases: []charm.Base{{}}}
  1097  
  1098  	// Trigger ApplicationCharmInfo call in application worker
  1099  	select {
  1100  	case s.appChanges <- struct{}{}:
  1101  	case <-time.After(coretesting.ShortWait):
  1102  		c.Fatal("timed out sending app change")
  1103  	}
  1104  	waitCharmGetterCalls("ApplicationCharmInfo")
  1105  
  1106  	// Ensure application worker exits due to charm becoming v2
  1107  	err = workertest.CheckKilled(c, appWorker)
  1108  	c.Assert(err, jc.ErrorIsNil)
  1109  }
  1110  
  1111  func (s *WorkerSuite) sendApplicationChanges(c *gc.C, appNames ...string) {
  1112  	select {
  1113  	case s.applicationChanges <- appNames:
  1114  	case <-time.After(coretesting.LongWait):
  1115  		c.Fatal("timed out sending applications change")
  1116  	}
  1117  }
  1118  
  1119  func (s *WorkerSuite) assertUnitChange(c *gc.C, reported, expectedUnitStatus status.Status) {
  1120  	defer s.setupMocks(c).Finish()
  1121  
  1122  	s.containerBroker.ResetCalls()
  1123  	s.unitUpdater.ResetCalls()
  1124  	s.containerBroker.reportedUnitStatus = reported
  1125  
  1126  	select {
  1127  	case s.caasUnitsChanges <- struct{}{}:
  1128  	case <-time.After(coretesting.LongWait):
  1129  		c.Fatal("timed out sending units change")
  1130  	}
  1131  
  1132  	retryCallArgs := coretesting.LongRetryStrategy
  1133  	retryCallArgs.Func = func() error {
  1134  		if len(s.containerBroker.Calls()) > 0 {
  1135  			return nil
  1136  		}
  1137  		return errors.Errorf("Not enough calls yet")
  1138  	}
  1139  	err := retry.Call(retryCallArgs)
  1140  	c.Assert(err, jc.ErrorIsNil)
  1141  	s.containerBroker.CheckCallNames(c, "Units")
  1142  	c.Assert(s.containerBroker.Calls()[0].Args, jc.DeepEquals, []interface{}{"gitlab", caas.ModeWorkload})
  1143  
  1144  	retryCallArgs.Func = func() error {
  1145  		if len(s.unitUpdater.Calls()) > 0 {
  1146  			return nil
  1147  		}
  1148  		return errors.Errorf("Not enough calls yet")
  1149  	}
  1150  	err = retry.Call(retryCallArgs)
  1151  	c.Assert(err, jc.ErrorIsNil)
  1152  	s.unitUpdater.CheckCallNames(c, "UpdateUnits")
  1153  	scale := 4
  1154  	c.Assert(s.unitUpdater.Calls()[0].Args, jc.DeepEquals, []interface{}{
  1155  		params.UpdateApplicationUnits{
  1156  			ApplicationTag: names.NewApplicationTag("gitlab").String(),
  1157  			Scale:          &scale,
  1158  			Units: []params.ApplicationUnitParams{
  1159  				{ProviderId: "u1", Address: "10.0.0.1", Ports: []string(nil), Status: expectedUnitStatus.String(),
  1160  					Stateful: true,
  1161  					FilesystemInfo: []params.KubernetesFilesystemInfo{
  1162  						{StorageName: "database", MountPoint: "/path-to-here", ReadOnly: true,
  1163  							FilesystemId: "fs-id", Size: 100, Pool: "",
  1164  							Volume: params.KubernetesVolumeInfo{
  1165  								VolumeId: "vol-id", Size: 200,
  1166  								Persistent: true, Status: "error", Info: "vol not ready"},
  1167  							Status: "attaching", Info: "not ready"},
  1168  					}},
  1169  			},
  1170  		},
  1171  	})
  1172  }
  1173  
  1174  func (s *WorkerSuite) setupMocks(c *gc.C) *gomock.Controller {
  1175  	ctrl := gomock.NewController(c)
  1176  
  1177  	s.statusSetter = caasunitprovisioner.NewMockProvisioningStatusSetter(ctrl)
  1178  
  1179  	s.config = caasunitprovisioner.Config{
  1180  		ApplicationGetter:        &s.applicationGetter,
  1181  		ApplicationUpdater:       &s.applicationUpdater,
  1182  		ServiceBroker:            &s.serviceBroker,
  1183  		ContainerBroker:          &s.containerBroker,
  1184  		ProvisioningInfoGetter:   &s.podSpecGetter,
  1185  		LifeGetter:               &s.lifeGetter,
  1186  		CharmGetter:              &s.charmGetter,
  1187  		UnitUpdater:              &s.unitUpdater,
  1188  		ProvisioningStatusSetter: s.statusSetter,
  1189  		Logger:                   loggo.GetLogger("test"),
  1190  	}
  1191  
  1192  	return ctrl
  1193  }