github.com/axw/juju@v0.0.0-20161005053422-4bd6544d08d4/worker/storageprovisioner/storageprovisioner_test.go (about)

     1  // Copyright 2015 Canonical Ltd.
     2  // Licensed under the AGPLv3, see LICENCE file for details.
     3  
     4  package storageprovisioner_test
     5  
     6  import (
     7  	"time"
     8  
     9  	"github.com/juju/errors"
    10  	jc "github.com/juju/testing/checkers"
    11  	"github.com/juju/utils/clock"
    12  	gc "gopkg.in/check.v1"
    13  	"gopkg.in/juju/names.v2"
    14  
    15  	"github.com/juju/juju/apiserver/params"
    16  	"github.com/juju/juju/instance"
    17  	"github.com/juju/juju/storage"
    18  	coretesting "github.com/juju/juju/testing"
    19  	"github.com/juju/juju/watcher"
    20  	"github.com/juju/juju/worker"
    21  	"github.com/juju/juju/worker/storageprovisioner"
    22  )
    23  
    24  type storageProvisionerSuite struct {
    25  	coretesting.BaseSuite
    26  	provider                *dummyProvider
    27  	registry                storage.ProviderRegistry
    28  	managedFilesystemSource *mockManagedFilesystemSource
    29  }
    30  
    31  var _ = gc.Suite(&storageProvisionerSuite{})
    32  
    33  func (s *storageProvisionerSuite) SetUpTest(c *gc.C) {
    34  	s.BaseSuite.SetUpTest(c)
    35  	s.provider = &dummyProvider{dynamic: true}
    36  	s.registry = storage.StaticProviderRegistry{
    37  		map[storage.ProviderType]storage.Provider{
    38  			"dummy": s.provider,
    39  		},
    40  	}
    41  
    42  	s.managedFilesystemSource = nil
    43  	s.PatchValue(
    44  		storageprovisioner.NewManagedFilesystemSource,
    45  		func(
    46  			blockDevices map[names.VolumeTag]storage.BlockDevice,
    47  			filesystems map[names.FilesystemTag]storage.Filesystem,
    48  		) storage.FilesystemSource {
    49  			s.managedFilesystemSource = &mockManagedFilesystemSource{
    50  				blockDevices: blockDevices,
    51  				filesystems:  filesystems,
    52  			}
    53  			return s.managedFilesystemSource
    54  		},
    55  	)
    56  }
    57  
    58  func (s *storageProvisionerSuite) TestStartStop(c *gc.C) {
    59  	worker, err := storageprovisioner.NewStorageProvisioner(storageprovisioner.Config{
    60  		Scope:       coretesting.ModelTag,
    61  		Volumes:     newMockVolumeAccessor(),
    62  		Filesystems: newMockFilesystemAccessor(),
    63  		Life:        &mockLifecycleManager{},
    64  		Registry:    s.registry,
    65  		Machines:    newMockMachineAccessor(c),
    66  		Status:      &mockStatusSetter{},
    67  		Clock:       &mockClock{},
    68  	})
    69  	c.Assert(err, jc.ErrorIsNil)
    70  
    71  	worker.Kill()
    72  	c.Assert(worker.Wait(), gc.IsNil)
    73  }
    74  
    75  func (s *storageProvisionerSuite) TestInvalidConfig(c *gc.C) {
    76  	_, err := storageprovisioner.NewStorageProvisioner(almostValidConfig())
    77  	c.Check(err, jc.Satisfies, errors.IsNotValid)
    78  }
    79  
    80  func (s *storageProvisionerSuite) TestVolumeAdded(c *gc.C) {
    81  	expectedVolumes := []params.Volume{{
    82  		VolumeTag: "volume-1",
    83  		Info: params.VolumeInfo{
    84  			VolumeId:   "id-1",
    85  			HardwareId: "serial-1",
    86  			Size:       1024,
    87  			Persistent: true,
    88  		},
    89  	}, {
    90  		VolumeTag: "volume-2",
    91  		Info: params.VolumeInfo{
    92  			VolumeId:   "id-2",
    93  			HardwareId: "serial-2",
    94  			Size:       1024,
    95  		},
    96  	}}
    97  	expectedVolumeAttachments := []params.VolumeAttachment{{
    98  		VolumeTag:  "volume-1",
    99  		MachineTag: "machine-1",
   100  		Info: params.VolumeAttachmentInfo{
   101  			DeviceName: "/dev/sda1",
   102  			ReadOnly:   true,
   103  		},
   104  	}, {
   105  		VolumeTag:  "volume-2",
   106  		MachineTag: "machine-1",
   107  		Info: params.VolumeAttachmentInfo{
   108  			DeviceName: "/dev/sda2",
   109  		},
   110  	}}
   111  
   112  	volumeInfoSet := make(chan interface{})
   113  	volumeAccessor := newMockVolumeAccessor()
   114  	volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1")
   115  	volumeAccessor.setVolumeInfo = func(volumes []params.Volume) ([]params.ErrorResult, error) {
   116  		defer close(volumeInfoSet)
   117  		c.Assert(volumes, jc.SameContents, expectedVolumes)
   118  		return nil, nil
   119  	}
   120  
   121  	volumeAttachmentInfoSet := make(chan interface{})
   122  	volumeAccessor.setVolumeAttachmentInfo = func(volumeAttachments []params.VolumeAttachment) ([]params.ErrorResult, error) {
   123  		defer close(volumeAttachmentInfoSet)
   124  		c.Assert(volumeAttachments, jc.SameContents, expectedVolumeAttachments)
   125  		return nil, nil
   126  	}
   127  
   128  	args := &workerArgs{volumes: volumeAccessor, registry: s.registry}
   129  	worker := newStorageProvisioner(c, args)
   130  	defer func() { c.Assert(worker.Wait(), gc.IsNil) }()
   131  	defer worker.Kill()
   132  
   133  	volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{
   134  		MachineTag: "machine-1", AttachmentTag: "volume-1",
   135  	}, {
   136  		MachineTag: "machine-1", AttachmentTag: "volume-2",
   137  	}}
   138  	assertNoEvent(c, volumeAttachmentInfoSet, "volume attachment set")
   139  
   140  	// The worker should create volumes according to ids "1" and "2".
   141  	volumeAccessor.volumesWatcher.changes <- []string{"1", "2"}
   142  	waitChannel(c, volumeInfoSet, "waiting for volume info to be set")
   143  	waitChannel(c, volumeAttachmentInfoSet, "waiting for volume attachments to be set")
   144  }
   145  
   146  func (s *storageProvisionerSuite) TestCreateVolumeCreatesAttachment(c *gc.C) {
   147  	volumeAccessor := newMockVolumeAccessor()
   148  	volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1")
   149  
   150  	volumeAttachmentInfoSet := make(chan interface{})
   151  	volumeAccessor.setVolumeAttachmentInfo = func(volumeAttachments []params.VolumeAttachment) ([]params.ErrorResult, error) {
   152  		defer close(volumeAttachmentInfoSet)
   153  		return make([]params.ErrorResult, len(volumeAttachments)), nil
   154  	}
   155  
   156  	s.provider.createVolumesFunc = func(args []storage.VolumeParams) ([]storage.CreateVolumesResult, error) {
   157  		volumeAccessor.provisionedAttachments[params.MachineStorageId{
   158  			MachineTag:    args[0].Attachment.Machine.String(),
   159  			AttachmentTag: args[0].Attachment.Volume.String(),
   160  		}] = params.VolumeAttachment{
   161  			VolumeTag:  args[0].Attachment.Volume.String(),
   162  			MachineTag: args[0].Attachment.Machine.String(),
   163  		}
   164  		return []storage.CreateVolumesResult{{
   165  			Volume: &storage.Volume{
   166  				Tag: args[0].Tag,
   167  				VolumeInfo: storage.VolumeInfo{
   168  					VolumeId: "vol-ume",
   169  				},
   170  			},
   171  			VolumeAttachment: &storage.VolumeAttachment{
   172  				Volume:  args[0].Attachment.Volume,
   173  				Machine: args[0].Attachment.Machine,
   174  			},
   175  		}}, nil
   176  	}
   177  
   178  	attachVolumesCalled := make(chan interface{})
   179  	s.provider.attachVolumesFunc = func(args []storage.VolumeAttachmentParams) ([]storage.AttachVolumesResult, error) {
   180  		defer close(attachVolumesCalled)
   181  		return nil, errors.New("should not be called")
   182  	}
   183  
   184  	args := &workerArgs{volumes: volumeAccessor, registry: s.registry}
   185  	worker := newStorageProvisioner(c, args)
   186  	defer func() { c.Assert(worker.Wait(), gc.IsNil) }()
   187  	defer worker.Kill()
   188  
   189  	volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{
   190  		MachineTag: "machine-1", AttachmentTag: "volume-1",
   191  	}}
   192  	assertNoEvent(c, volumeAttachmentInfoSet, "volume attachment set")
   193  
   194  	// The worker should create volumes according to ids "1".
   195  	volumeAccessor.volumesWatcher.changes <- []string{"1"}
   196  	waitChannel(c, volumeAttachmentInfoSet, "waiting for volume attachments to be set")
   197  	assertNoEvent(c, attachVolumesCalled, "AttachVolumes called")
   198  }
   199  
   200  func (s *storageProvisionerSuite) TestCreateVolumeRetry(c *gc.C) {
   201  	volumeInfoSet := make(chan interface{})
   202  	volumeAccessor := newMockVolumeAccessor()
   203  	volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1")
   204  	volumeAccessor.setVolumeInfo = func(volumes []params.Volume) ([]params.ErrorResult, error) {
   205  		defer close(volumeInfoSet)
   206  		return make([]params.ErrorResult, len(volumes)), nil
   207  	}
   208  
   209  	// mockFunc's After will progress the current time by the specified
   210  	// duration and signal the channel immediately.
   211  	clock := &mockClock{}
   212  	var createVolumeTimes []time.Time
   213  
   214  	s.provider.createVolumesFunc = func(args []storage.VolumeParams) ([]storage.CreateVolumesResult, error) {
   215  		createVolumeTimes = append(createVolumeTimes, clock.Now())
   216  		if len(createVolumeTimes) < 10 {
   217  			return []storage.CreateVolumesResult{{Error: errors.New("badness")}}, nil
   218  		}
   219  		return []storage.CreateVolumesResult{{
   220  			Volume: &storage.Volume{Tag: args[0].Tag},
   221  		}}, nil
   222  	}
   223  
   224  	args := &workerArgs{volumes: volumeAccessor, clock: clock, registry: s.registry}
   225  	worker := newStorageProvisioner(c, args)
   226  	defer func() { c.Assert(worker.Wait(), gc.IsNil) }()
   227  	defer worker.Kill()
   228  
   229  	volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{
   230  		MachineTag: "machine-1", AttachmentTag: "volume-1",
   231  	}}
   232  	volumeAccessor.volumesWatcher.changes <- []string{"1"}
   233  	waitChannel(c, volumeInfoSet, "waiting for volume info to be set")
   234  	c.Assert(createVolumeTimes, gc.HasLen, 10)
   235  
   236  	// The first attempt should have been immediate: T0.
   237  	c.Assert(createVolumeTimes[0], gc.Equals, time.Time{})
   238  
   239  	delays := make([]time.Duration, len(createVolumeTimes)-1)
   240  	for i := range createVolumeTimes[1:] {
   241  		delays[i] = createVolumeTimes[i+1].Sub(createVolumeTimes[i])
   242  	}
   243  	c.Assert(delays, jc.DeepEquals, []time.Duration{
   244  		30 * time.Second,
   245  		1 * time.Minute,
   246  		2 * time.Minute,
   247  		4 * time.Minute,
   248  		8 * time.Minute,
   249  		16 * time.Minute,
   250  		30 * time.Minute, // ceiling reached
   251  		30 * time.Minute,
   252  		30 * time.Minute,
   253  	})
   254  
   255  	c.Assert(args.statusSetter.args, jc.DeepEquals, []params.EntityStatusArgs{
   256  		{Tag: "volume-1", Status: "pending", Info: "badness"},
   257  		{Tag: "volume-1", Status: "pending", Info: "badness"},
   258  		{Tag: "volume-1", Status: "pending", Info: "badness"},
   259  		{Tag: "volume-1", Status: "pending", Info: "badness"},
   260  		{Tag: "volume-1", Status: "pending", Info: "badness"},
   261  		{Tag: "volume-1", Status: "pending", Info: "badness"},
   262  		{Tag: "volume-1", Status: "pending", Info: "badness"},
   263  		{Tag: "volume-1", Status: "pending", Info: "badness"},
   264  		{Tag: "volume-1", Status: "pending", Info: "badness"},
   265  		{Tag: "volume-1", Status: "attaching", Info: ""},
   266  	})
   267  }
   268  
   269  func (s *storageProvisionerSuite) TestCreateFilesystemRetry(c *gc.C) {
   270  	filesystemInfoSet := make(chan interface{})
   271  	filesystemAccessor := newMockFilesystemAccessor()
   272  	filesystemAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1")
   273  	filesystemAccessor.setFilesystemInfo = func(filesystems []params.Filesystem) ([]params.ErrorResult, error) {
   274  		defer close(filesystemInfoSet)
   275  		return make([]params.ErrorResult, len(filesystems)), nil
   276  	}
   277  
   278  	// mockFunc's After will progress the current time by the specified
   279  	// duration and signal the channel immediately.
   280  	clock := &mockClock{}
   281  	var createFilesystemTimes []time.Time
   282  
   283  	s.provider.createFilesystemsFunc = func(args []storage.FilesystemParams) ([]storage.CreateFilesystemsResult, error) {
   284  		createFilesystemTimes = append(createFilesystemTimes, clock.Now())
   285  		if len(createFilesystemTimes) < 10 {
   286  			return []storage.CreateFilesystemsResult{{Error: errors.New("badness")}}, nil
   287  		}
   288  		return []storage.CreateFilesystemsResult{{
   289  			Filesystem: &storage.Filesystem{Tag: args[0].Tag},
   290  		}}, nil
   291  	}
   292  
   293  	args := &workerArgs{filesystems: filesystemAccessor, clock: clock, registry: s.registry}
   294  	worker := newStorageProvisioner(c, args)
   295  	defer func() { c.Assert(worker.Wait(), gc.IsNil) }()
   296  	defer worker.Kill()
   297  
   298  	filesystemAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{
   299  		MachineTag: "machine-1", AttachmentTag: "filesystem-1",
   300  	}}
   301  	filesystemAccessor.filesystemsWatcher.changes <- []string{"1"}
   302  	waitChannel(c, filesystemInfoSet, "waiting for filesystem info to be set")
   303  	c.Assert(createFilesystemTimes, gc.HasLen, 10)
   304  
   305  	// The first attempt should have been immediate: T0.
   306  	c.Assert(createFilesystemTimes[0], gc.Equals, time.Time{})
   307  
   308  	delays := make([]time.Duration, len(createFilesystemTimes)-1)
   309  	for i := range createFilesystemTimes[1:] {
   310  		delays[i] = createFilesystemTimes[i+1].Sub(createFilesystemTimes[i])
   311  	}
   312  	c.Assert(delays, jc.DeepEquals, []time.Duration{
   313  		30 * time.Second,
   314  		1 * time.Minute,
   315  		2 * time.Minute,
   316  		4 * time.Minute,
   317  		8 * time.Minute,
   318  		16 * time.Minute,
   319  		30 * time.Minute, // ceiling reached
   320  		30 * time.Minute,
   321  		30 * time.Minute,
   322  	})
   323  
   324  	c.Assert(args.statusSetter.args, jc.DeepEquals, []params.EntityStatusArgs{
   325  		{Tag: "filesystem-1", Status: "pending", Info: "badness"},
   326  		{Tag: "filesystem-1", Status: "pending", Info: "badness"},
   327  		{Tag: "filesystem-1", Status: "pending", Info: "badness"},
   328  		{Tag: "filesystem-1", Status: "pending", Info: "badness"},
   329  		{Tag: "filesystem-1", Status: "pending", Info: "badness"},
   330  		{Tag: "filesystem-1", Status: "pending", Info: "badness"},
   331  		{Tag: "filesystem-1", Status: "pending", Info: "badness"},
   332  		{Tag: "filesystem-1", Status: "pending", Info: "badness"},
   333  		{Tag: "filesystem-1", Status: "pending", Info: "badness"},
   334  		{Tag: "filesystem-1", Status: "attaching", Info: ""},
   335  	})
   336  }
   337  
   338  func (s *storageProvisionerSuite) TestAttachVolumeRetry(c *gc.C) {
   339  	volumeInfoSet := make(chan interface{})
   340  	volumeAccessor := newMockVolumeAccessor()
   341  	volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1")
   342  	volumeAccessor.setVolumeInfo = func(volumes []params.Volume) ([]params.ErrorResult, error) {
   343  		defer close(volumeInfoSet)
   344  		return make([]params.ErrorResult, len(volumes)), nil
   345  	}
   346  	volumeAttachmentInfoSet := make(chan interface{})
   347  	volumeAccessor.setVolumeAttachmentInfo = func(volumeAttachments []params.VolumeAttachment) ([]params.ErrorResult, error) {
   348  		defer close(volumeAttachmentInfoSet)
   349  		return make([]params.ErrorResult, len(volumeAttachments)), nil
   350  	}
   351  
   352  	// mockFunc's After will progress the current time by the specified
   353  	// duration and signal the channel immediately.
   354  	clock := &mockClock{}
   355  	var attachVolumeTimes []time.Time
   356  
   357  	s.provider.attachVolumesFunc = func(args []storage.VolumeAttachmentParams) ([]storage.AttachVolumesResult, error) {
   358  		attachVolumeTimes = append(attachVolumeTimes, clock.Now())
   359  		if len(attachVolumeTimes) < 10 {
   360  			return []storage.AttachVolumesResult{{Error: errors.New("badness")}}, nil
   361  		}
   362  		return []storage.AttachVolumesResult{{
   363  			VolumeAttachment: &storage.VolumeAttachment{
   364  				args[0].Volume,
   365  				args[0].Machine,
   366  				storage.VolumeAttachmentInfo{
   367  					DeviceName: "/dev/sda1",
   368  				},
   369  			},
   370  		}}, nil
   371  	}
   372  
   373  	args := &workerArgs{volumes: volumeAccessor, clock: clock, registry: s.registry}
   374  	worker := newStorageProvisioner(c, args)
   375  	defer func() { c.Assert(worker.Wait(), gc.IsNil) }()
   376  	defer worker.Kill()
   377  
   378  	volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{
   379  		MachineTag: "machine-1", AttachmentTag: "volume-1",
   380  	}}
   381  	volumeAccessor.volumesWatcher.changes <- []string{"1"}
   382  	waitChannel(c, volumeInfoSet, "waiting for volume info to be set")
   383  	waitChannel(c, volumeAttachmentInfoSet, "waiting for volume attachments to be set")
   384  	c.Assert(attachVolumeTimes, gc.HasLen, 10)
   385  
   386  	// The first attempt should have been immediate: T0.
   387  	c.Assert(attachVolumeTimes[0], gc.Equals, time.Time{})
   388  
   389  	delays := make([]time.Duration, len(attachVolumeTimes)-1)
   390  	for i := range attachVolumeTimes[1:] {
   391  		delays[i] = attachVolumeTimes[i+1].Sub(attachVolumeTimes[i])
   392  	}
   393  	c.Assert(delays, jc.DeepEquals, []time.Duration{
   394  		30 * time.Second,
   395  		1 * time.Minute,
   396  		2 * time.Minute,
   397  		4 * time.Minute,
   398  		8 * time.Minute,
   399  		16 * time.Minute,
   400  		30 * time.Minute, // ceiling reached
   401  		30 * time.Minute,
   402  		30 * time.Minute,
   403  	})
   404  
   405  	c.Assert(args.statusSetter.args, jc.DeepEquals, []params.EntityStatusArgs{
   406  		{Tag: "volume-1", Status: "attaching", Info: ""},        // CreateVolumes
   407  		{Tag: "volume-1", Status: "attaching", Info: "badness"}, // AttachVolumes
   408  		{Tag: "volume-1", Status: "attaching", Info: "badness"},
   409  		{Tag: "volume-1", Status: "attaching", Info: "badness"},
   410  		{Tag: "volume-1", Status: "attaching", Info: "badness"},
   411  		{Tag: "volume-1", Status: "attaching", Info: "badness"},
   412  		{Tag: "volume-1", Status: "attaching", Info: "badness"},
   413  		{Tag: "volume-1", Status: "attaching", Info: "badness"},
   414  		{Tag: "volume-1", Status: "attaching", Info: "badness"},
   415  		{Tag: "volume-1", Status: "attaching", Info: "badness"},
   416  		{Tag: "volume-1", Status: "attached", Info: ""},
   417  	})
   418  }
   419  
   420  func (s *storageProvisionerSuite) TestAttachFilesystemRetry(c *gc.C) {
   421  	filesystemInfoSet := make(chan interface{})
   422  	filesystemAccessor := newMockFilesystemAccessor()
   423  	filesystemAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1")
   424  	filesystemAccessor.setFilesystemInfo = func(filesystems []params.Filesystem) ([]params.ErrorResult, error) {
   425  		defer close(filesystemInfoSet)
   426  		return make([]params.ErrorResult, len(filesystems)), nil
   427  	}
   428  	filesystemAttachmentInfoSet := make(chan interface{})
   429  	filesystemAccessor.setFilesystemAttachmentInfo = func(filesystemAttachments []params.FilesystemAttachment) ([]params.ErrorResult, error) {
   430  		defer close(filesystemAttachmentInfoSet)
   431  		return make([]params.ErrorResult, len(filesystemAttachments)), nil
   432  	}
   433  
   434  	// mockFunc's After will progress the current time by the specified
   435  	// duration and signal the channel immediately.
   436  	clock := &mockClock{}
   437  	var attachFilesystemTimes []time.Time
   438  
   439  	s.provider.attachFilesystemsFunc = func(args []storage.FilesystemAttachmentParams) ([]storage.AttachFilesystemsResult, error) {
   440  		attachFilesystemTimes = append(attachFilesystemTimes, clock.Now())
   441  		if len(attachFilesystemTimes) < 10 {
   442  			return []storage.AttachFilesystemsResult{{Error: errors.New("badness")}}, nil
   443  		}
   444  		return []storage.AttachFilesystemsResult{{
   445  			FilesystemAttachment: &storage.FilesystemAttachment{
   446  				args[0].Filesystem,
   447  				args[0].Machine,
   448  				storage.FilesystemAttachmentInfo{
   449  					Path: "/oh/over/there",
   450  				},
   451  			},
   452  		}}, nil
   453  	}
   454  
   455  	args := &workerArgs{filesystems: filesystemAccessor, clock: clock, registry: s.registry}
   456  	worker := newStorageProvisioner(c, args)
   457  	defer func() { c.Assert(worker.Wait(), gc.IsNil) }()
   458  	defer worker.Kill()
   459  
   460  	filesystemAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{
   461  		MachineTag: "machine-1", AttachmentTag: "filesystem-1",
   462  	}}
   463  	filesystemAccessor.filesystemsWatcher.changes <- []string{"1"}
   464  	waitChannel(c, filesystemInfoSet, "waiting for filesystem info to be set")
   465  	waitChannel(c, filesystemAttachmentInfoSet, "waiting for filesystem attachments to be set")
   466  	c.Assert(attachFilesystemTimes, gc.HasLen, 10)
   467  
   468  	// The first attempt should have been immediate: T0.
   469  	c.Assert(attachFilesystemTimes[0], gc.Equals, time.Time{})
   470  
   471  	delays := make([]time.Duration, len(attachFilesystemTimes)-1)
   472  	for i := range attachFilesystemTimes[1:] {
   473  		delays[i] = attachFilesystemTimes[i+1].Sub(attachFilesystemTimes[i])
   474  	}
   475  	c.Assert(delays, jc.DeepEquals, []time.Duration{
   476  		30 * time.Second,
   477  		1 * time.Minute,
   478  		2 * time.Minute,
   479  		4 * time.Minute,
   480  		8 * time.Minute,
   481  		16 * time.Minute,
   482  		30 * time.Minute, // ceiling reached
   483  		30 * time.Minute,
   484  		30 * time.Minute,
   485  	})
   486  
   487  	c.Assert(args.statusSetter.args, jc.DeepEquals, []params.EntityStatusArgs{
   488  		{Tag: "filesystem-1", Status: "attaching", Info: ""},        // CreateFilesystems
   489  		{Tag: "filesystem-1", Status: "attaching", Info: "badness"}, // AttachFilesystems
   490  		{Tag: "filesystem-1", Status: "attaching", Info: "badness"},
   491  		{Tag: "filesystem-1", Status: "attaching", Info: "badness"},
   492  		{Tag: "filesystem-1", Status: "attaching", Info: "badness"},
   493  		{Tag: "filesystem-1", Status: "attaching", Info: "badness"},
   494  		{Tag: "filesystem-1", Status: "attaching", Info: "badness"},
   495  		{Tag: "filesystem-1", Status: "attaching", Info: "badness"},
   496  		{Tag: "filesystem-1", Status: "attaching", Info: "badness"},
   497  		{Tag: "filesystem-1", Status: "attaching", Info: "badness"},
   498  		{Tag: "filesystem-1", Status: "attached", Info: ""},
   499  	})
   500  }
   501  
   502  func (s *storageProvisionerSuite) TestValidateVolumeParams(c *gc.C) {
   503  	volumeAccessor := newMockVolumeAccessor()
   504  	volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1")
   505  	volumeAccessor.provisionedVolumes["volume-3"] = params.Volume{VolumeTag: "volume-3"}
   506  	volumeAccessor.provisionedVolumes["volume-4"] = params.Volume{
   507  		VolumeTag: "volume-4",
   508  		Info:      params.VolumeInfo{VolumeId: "vol-ume"},
   509  	}
   510  
   511  	var validateCalls int
   512  	validated := make(chan interface{}, 1)
   513  	s.provider.validateVolumeParamsFunc = func(p storage.VolumeParams) error {
   514  		validateCalls++
   515  		validated <- p
   516  		switch p.Tag.String() {
   517  		case "volume-1", "volume-3":
   518  			return errors.New("something is wrong")
   519  		}
   520  		return nil
   521  	}
   522  
   523  	life := func(tags []names.Tag) ([]params.LifeResult, error) {
   524  		results := make([]params.LifeResult, len(tags))
   525  		for i := range results {
   526  			switch tags[i].String() {
   527  			case "volume-3", "volume-4":
   528  				results[i].Life = params.Dead
   529  			default:
   530  				results[i].Life = params.Alive
   531  			}
   532  		}
   533  		return results, nil
   534  	}
   535  
   536  	createdVolumes := make(chan interface{}, 1)
   537  	s.provider.createVolumesFunc = func(args []storage.VolumeParams) ([]storage.CreateVolumesResult, error) {
   538  		createdVolumes <- args
   539  		if len(args) != 1 {
   540  			return nil, errors.New("expected one argument")
   541  		}
   542  		return []storage.CreateVolumesResult{{
   543  			Volume: &storage.Volume{Tag: args[0].Tag},
   544  		}}, nil
   545  	}
   546  
   547  	destroyedVolumes := make(chan interface{}, 1)
   548  	s.provider.destroyVolumesFunc = func(volumeIds []string) ([]error, error) {
   549  		destroyedVolumes <- volumeIds
   550  		return make([]error, len(volumeIds)), nil
   551  	}
   552  
   553  	args := &workerArgs{
   554  		volumes: volumeAccessor,
   555  		life: &mockLifecycleManager{
   556  			life: life,
   557  		},
   558  		registry: s.registry,
   559  	}
   560  	worker := newStorageProvisioner(c, args)
   561  	defer func() { c.Assert(worker.Wait(), gc.IsNil) }()
   562  	defer worker.Kill()
   563  
   564  	volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{
   565  		MachineTag: "machine-1", AttachmentTag: "volume-1",
   566  	}, {
   567  		MachineTag: "machine-1", AttachmentTag: "volume-2",
   568  	}}
   569  	volumeAccessor.volumesWatcher.changes <- []string{"1"}
   570  	waitChannel(c, validated, "waiting for volume parameter validation")
   571  	assertNoEvent(c, createdVolumes, "volume created")
   572  	c.Assert(validateCalls, gc.Equals, 1)
   573  
   574  	// Failure to create volume-1 should not block creation volume-2.
   575  	volumeAccessor.volumesWatcher.changes <- []string{"2"}
   576  	waitChannel(c, validated, "waiting for volume parameter validation")
   577  	createVolumeParams := waitChannel(c, createdVolumes, "volume created").([]storage.VolumeParams)
   578  	c.Assert(createVolumeParams, gc.HasLen, 1)
   579  	c.Assert(createVolumeParams[0].Tag.String(), gc.Equals, "volume-2")
   580  	c.Assert(validateCalls, gc.Equals, 2)
   581  
   582  	volumeAccessor.volumesWatcher.changes <- []string{"3"}
   583  	waitChannel(c, validated, "waiting for volume parameter validation")
   584  	assertNoEvent(c, destroyedVolumes, "volume destroyed")
   585  	c.Assert(validateCalls, gc.Equals, 3)
   586  
   587  	// Failure to destroy volume-3 should not block creation of volume-4.
   588  	volumeAccessor.volumesWatcher.changes <- []string{"4"}
   589  	waitChannel(c, validated, "waiting for volume parameter validation")
   590  	destroyVolumeParams := waitChannel(c, destroyedVolumes, "volume destroyed").([]string)
   591  	c.Assert(destroyVolumeParams, jc.DeepEquals, []string{"vol-ume"})
   592  	c.Assert(validateCalls, gc.Equals, 4)
   593  
   594  	c.Assert(args.statusSetter.args, jc.DeepEquals, []params.EntityStatusArgs{
   595  		{Tag: "volume-1", Status: "error", Info: "something is wrong"},
   596  		{Tag: "volume-2", Status: "attaching"},
   597  		{Tag: "volume-3", Status: "error", Info: "something is wrong"},
   598  		// destroyed volumes are removed immediately,
   599  		// so there is no status update.
   600  	})
   601  }
   602  
   603  func (s *storageProvisionerSuite) TestValidateFilesystemParams(c *gc.C) {
   604  	filesystemAccessor := newMockFilesystemAccessor()
   605  	filesystemAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1")
   606  	filesystemAccessor.provisionedFilesystems["filesystem-3"] = params.Filesystem{FilesystemTag: "filesystem-3"}
   607  	filesystemAccessor.provisionedFilesystems["filesystem-4"] = params.Filesystem{
   608  		FilesystemTag: "filesystem-4",
   609  		Info:          params.FilesystemInfo{FilesystemId: "fs-id"},
   610  	}
   611  
   612  	var validateCalls int
   613  	validated := make(chan interface{}, 1)
   614  	s.provider.validateFilesystemParamsFunc = func(p storage.FilesystemParams) error {
   615  		validateCalls++
   616  		validated <- p
   617  		switch p.Tag.String() {
   618  		case "filesystem-1", "filesystem-3":
   619  			return errors.New("something is wrong")
   620  		}
   621  		return nil
   622  	}
   623  
   624  	life := func(tags []names.Tag) ([]params.LifeResult, error) {
   625  		results := make([]params.LifeResult, len(tags))
   626  		for i := range results {
   627  			switch tags[i].String() {
   628  			case "filesystem-3", "filesystem-4":
   629  				results[i].Life = params.Dead
   630  			default:
   631  				results[i].Life = params.Alive
   632  			}
   633  		}
   634  		return results, nil
   635  	}
   636  
   637  	createdFilesystems := make(chan interface{}, 1)
   638  	s.provider.createFilesystemsFunc = func(args []storage.FilesystemParams) ([]storage.CreateFilesystemsResult, error) {
   639  		createdFilesystems <- args
   640  		if len(args) != 1 {
   641  			return nil, errors.New("expected one argument")
   642  		}
   643  		return []storage.CreateFilesystemsResult{{
   644  			Filesystem: &storage.Filesystem{Tag: args[0].Tag},
   645  		}}, nil
   646  	}
   647  
   648  	destroyedFilesystems := make(chan interface{}, 1)
   649  	s.provider.destroyFilesystemsFunc = func(filesystemIds []string) ([]error, error) {
   650  		destroyedFilesystems <- filesystemIds
   651  		return make([]error, len(filesystemIds)), nil
   652  	}
   653  
   654  	args := &workerArgs{
   655  		filesystems: filesystemAccessor,
   656  		life: &mockLifecycleManager{
   657  			life: life,
   658  		},
   659  		registry: s.registry,
   660  	}
   661  	worker := newStorageProvisioner(c, args)
   662  	defer func() { c.Assert(worker.Wait(), gc.IsNil) }()
   663  	defer worker.Kill()
   664  
   665  	filesystemAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{
   666  		MachineTag: "machine-1", AttachmentTag: "filesystem-1",
   667  	}, {
   668  		MachineTag: "machine-1", AttachmentTag: "filesystem-2",
   669  	}}
   670  	filesystemAccessor.filesystemsWatcher.changes <- []string{"1"}
   671  	waitChannel(c, validated, "waiting for filesystem parameter validation")
   672  	assertNoEvent(c, createdFilesystems, "filesystem created")
   673  	c.Assert(validateCalls, gc.Equals, 1)
   674  
   675  	// Failure to create filesystem-1 should not block creation filesystem-2.
   676  	filesystemAccessor.filesystemsWatcher.changes <- []string{"2"}
   677  	waitChannel(c, validated, "waiting for filesystem parameter validation")
   678  	createFilesystemParams := waitChannel(c, createdFilesystems, "filesystem created").([]storage.FilesystemParams)
   679  	c.Assert(createFilesystemParams, gc.HasLen, 1)
   680  	c.Assert(createFilesystemParams[0].Tag.String(), gc.Equals, "filesystem-2")
   681  	c.Assert(validateCalls, gc.Equals, 2)
   682  
   683  	filesystemAccessor.filesystemsWatcher.changes <- []string{"3"}
   684  	waitChannel(c, validated, "waiting for filesystem parameter validation")
   685  	assertNoEvent(c, destroyedFilesystems, "filesystem destroyed")
   686  	c.Assert(validateCalls, gc.Equals, 3)
   687  
   688  	// Failure to destroy filesystem-3 should not block creation of filesystem-4.
   689  	filesystemAccessor.filesystemsWatcher.changes <- []string{"4"}
   690  	waitChannel(c, validated, "waiting for filesystem parameter validation")
   691  	destroyFilesystemParams := waitChannel(c, destroyedFilesystems, "filesystem destroyed").([]string)
   692  	c.Assert(destroyFilesystemParams, jc.DeepEquals, []string{"fs-id"})
   693  	c.Assert(validateCalls, gc.Equals, 4)
   694  
   695  	c.Assert(args.statusSetter.args, jc.DeepEquals, []params.EntityStatusArgs{
   696  		{Tag: "filesystem-1", Status: "error", Info: "something is wrong"},
   697  		{Tag: "filesystem-2", Status: "attaching"},
   698  		{Tag: "filesystem-3", Status: "error", Info: "something is wrong"},
   699  		// destroyed filesystems are removed immediately,
   700  		// so there is no status update.
   701  	})
   702  }
   703  
   704  func (s *storageProvisionerSuite) TestFilesystemAdded(c *gc.C) {
   705  	expectedFilesystems := []params.Filesystem{{
   706  		FilesystemTag: "filesystem-1",
   707  		Info: params.FilesystemInfo{
   708  			FilesystemId: "id-1",
   709  			Size:         1024,
   710  		},
   711  	}, {
   712  		FilesystemTag: "filesystem-2",
   713  		Info: params.FilesystemInfo{
   714  			FilesystemId: "id-2",
   715  			Size:         1024,
   716  		},
   717  	}}
   718  
   719  	filesystemInfoSet := make(chan interface{})
   720  	filesystemAccessor := newMockFilesystemAccessor()
   721  	filesystemAccessor.setFilesystemInfo = func(filesystems []params.Filesystem) ([]params.ErrorResult, error) {
   722  		defer close(filesystemInfoSet)
   723  		c.Assert(filesystems, jc.SameContents, expectedFilesystems)
   724  		return nil, nil
   725  	}
   726  
   727  	args := &workerArgs{filesystems: filesystemAccessor, registry: s.registry}
   728  	worker := newStorageProvisioner(c, args)
   729  	defer func() { c.Assert(worker.Wait(), gc.IsNil) }()
   730  	defer worker.Kill()
   731  
   732  	// The worker should create filesystems according to ids "1" and "2".
   733  	filesystemAccessor.filesystemsWatcher.changes <- []string{"1", "2"}
   734  	waitChannel(c, filesystemInfoSet, "waiting for filesystem info to be set")
   735  }
   736  
   737  func (s *storageProvisionerSuite) TestVolumeNeedsInstance(c *gc.C) {
   738  	volumeInfoSet := make(chan interface{})
   739  	volumeAccessor := newMockVolumeAccessor()
   740  	volumeAccessor.setVolumeInfo = func([]params.Volume) ([]params.ErrorResult, error) {
   741  		defer close(volumeInfoSet)
   742  		return nil, nil
   743  	}
   744  	volumeAccessor.setVolumeAttachmentInfo = func([]params.VolumeAttachment) ([]params.ErrorResult, error) {
   745  		return nil, nil
   746  	}
   747  
   748  	args := &workerArgs{volumes: volumeAccessor, registry: s.registry}
   749  	worker := newStorageProvisioner(c, args)
   750  	defer worker.Wait()
   751  	defer worker.Kill()
   752  
   753  	volumeAccessor.volumesWatcher.changes <- []string{needsInstanceVolumeId}
   754  	assertNoEvent(c, volumeInfoSet, "volume info set")
   755  	args.machines.instanceIds[names.NewMachineTag("1")] = "inst-id"
   756  	args.machines.watcher.changes <- struct{}{}
   757  	waitChannel(c, volumeInfoSet, "waiting for volume info to be set")
   758  }
   759  
   760  func (s *storageProvisionerSuite) TestVolumeNonDynamic(c *gc.C) {
   761  	volumeInfoSet := make(chan interface{})
   762  	volumeAccessor := newMockVolumeAccessor()
   763  	volumeAccessor.setVolumeInfo = func([]params.Volume) ([]params.ErrorResult, error) {
   764  		defer close(volumeInfoSet)
   765  		return nil, nil
   766  	}
   767  
   768  	args := &workerArgs{volumes: volumeAccessor, registry: s.registry}
   769  	worker := newStorageProvisioner(c, args)
   770  	defer worker.Wait()
   771  	defer worker.Kill()
   772  
   773  	// Volumes for non-dynamic providers should not be created.
   774  	s.provider.dynamic = false
   775  	volumeAccessor.volumesWatcher.changes <- []string{"1"}
   776  	assertNoEvent(c, volumeInfoSet, "volume info set")
   777  }
   778  
   779  func (s *storageProvisionerSuite) TestVolumeAttachmentAdded(c *gc.C) {
   780  	// We should get two volume attachments:
   781  	//   - volume-1 to machine-1, because the volume and
   782  	//     machine are provisioned, but the attachment is not.
   783  	//   - volume-1 to machine-0, because the volume,
   784  	//     machine, and attachment are provisioned, but
   785  	//     in a previous session, so a reattachment is
   786  	//     requested.
   787  	expectedVolumeAttachments := []params.VolumeAttachment{{
   788  		VolumeTag:  "volume-1",
   789  		MachineTag: "machine-1",
   790  		Info: params.VolumeAttachmentInfo{
   791  			DeviceName: "/dev/sda1",
   792  			ReadOnly:   true,
   793  		},
   794  	}, {
   795  		VolumeTag:  "volume-1",
   796  		MachineTag: "machine-0",
   797  		Info: params.VolumeAttachmentInfo{
   798  			DeviceName: "/dev/sda1",
   799  			ReadOnly:   true,
   800  		},
   801  	}}
   802  
   803  	var allVolumeAttachments []params.VolumeAttachment
   804  	volumeAttachmentInfoSet := make(chan interface{})
   805  	volumeAccessor := newMockVolumeAccessor()
   806  	volumeAccessor.setVolumeAttachmentInfo = func(volumeAttachments []params.VolumeAttachment) ([]params.ErrorResult, error) {
   807  		allVolumeAttachments = append(allVolumeAttachments, volumeAttachments...)
   808  		volumeAttachmentInfoSet <- nil
   809  		return make([]params.ErrorResult, len(volumeAttachments)), nil
   810  	}
   811  
   812  	// volume-1, machine-0, and machine-1 are provisioned.
   813  	volumeAccessor.provisionedVolumes["volume-1"] = params.Volume{
   814  		VolumeTag: "volume-1",
   815  		Info: params.VolumeInfo{
   816  			VolumeId: "vol-123",
   817  		},
   818  	}
   819  	volumeAccessor.provisionedMachines["machine-0"] = instance.Id("already-provisioned-0")
   820  	volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1")
   821  
   822  	// machine-0/volume-1 attachment is already created.
   823  	// We should see a reattachment.
   824  	alreadyAttached := params.MachineStorageId{
   825  		MachineTag:    "machine-0",
   826  		AttachmentTag: "volume-1",
   827  	}
   828  	volumeAccessor.provisionedAttachments[alreadyAttached] = params.VolumeAttachment{
   829  		MachineTag: "machine-0",
   830  		VolumeTag:  "volume-1",
   831  	}
   832  
   833  	args := &workerArgs{volumes: volumeAccessor, registry: s.registry}
   834  	worker := newStorageProvisioner(c, args)
   835  	defer func() { c.Assert(worker.Wait(), gc.IsNil) }()
   836  	defer worker.Kill()
   837  
   838  	volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{
   839  		MachineTag: "machine-1", AttachmentTag: "volume-1",
   840  	}, {
   841  		MachineTag: "machine-1", AttachmentTag: "volume-2",
   842  	}, {
   843  		MachineTag: "machine-2", AttachmentTag: "volume-1",
   844  	}, {
   845  		MachineTag: "machine-0", AttachmentTag: "volume-1",
   846  	}}
   847  	assertNoEvent(c, volumeAttachmentInfoSet, "volume attachment info set")
   848  	volumeAccessor.volumesWatcher.changes <- []string{"1"}
   849  	waitChannel(c, volumeAttachmentInfoSet, "waiting for volume attachments to be set")
   850  	c.Assert(allVolumeAttachments, jc.SameContents, expectedVolumeAttachments)
   851  
   852  	// Reattachment should only happen once per session.
   853  	volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{
   854  		MachineTag:    "machine-0",
   855  		AttachmentTag: "volume-1",
   856  	}}
   857  	assertNoEvent(c, volumeAttachmentInfoSet, "volume attachment info set")
   858  }
   859  
   860  func (s *storageProvisionerSuite) TestFilesystemAttachmentAdded(c *gc.C) {
   861  	// We should only get a single filesystem attachment, because it is the
   862  	// only combination where both machine and filesystem are already
   863  	// provisioned, and the attachmenti s not.
   864  	// We should get two filesystem attachments:
   865  	//   - filesystem-1 to machine-1, because the filesystem and
   866  	//     machine are provisioned, but the attachment is not.
   867  	//   - filesystem-1 to machine-0, because the filesystem,
   868  	//     machine, and attachment are provisioned, but in a
   869  	//     previous session, so a reattachment is requested.
   870  	expectedFilesystemAttachments := []params.FilesystemAttachment{{
   871  		FilesystemTag: "filesystem-1",
   872  		MachineTag:    "machine-1",
   873  		Info: params.FilesystemAttachmentInfo{
   874  			MountPoint: "/srv/fs-123",
   875  		},
   876  	}, {
   877  		FilesystemTag: "filesystem-1",
   878  		MachineTag:    "machine-0",
   879  		Info: params.FilesystemAttachmentInfo{
   880  			MountPoint: "/srv/fs-123",
   881  		},
   882  	}}
   883  
   884  	var allFilesystemAttachments []params.FilesystemAttachment
   885  	filesystemAttachmentInfoSet := make(chan interface{})
   886  	filesystemAccessor := newMockFilesystemAccessor()
   887  	filesystemAccessor.setFilesystemAttachmentInfo = func(filesystemAttachments []params.FilesystemAttachment) ([]params.ErrorResult, error) {
   888  		allFilesystemAttachments = append(allFilesystemAttachments, filesystemAttachments...)
   889  		filesystemAttachmentInfoSet <- nil
   890  		return make([]params.ErrorResult, len(filesystemAttachments)), nil
   891  	}
   892  
   893  	// filesystem-1 and machine-1 are provisioned.
   894  	filesystemAccessor.provisionedFilesystems["filesystem-1"] = params.Filesystem{
   895  		FilesystemTag: "filesystem-1",
   896  		Info: params.FilesystemInfo{
   897  			FilesystemId: "fs-123",
   898  		},
   899  	}
   900  	filesystemAccessor.provisionedMachines["machine-0"] = instance.Id("already-provisioned-0")
   901  	filesystemAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1")
   902  
   903  	// machine-0/filesystem-1 attachment is already created.
   904  	// We should see a reattachment.
   905  	alreadyAttached := params.MachineStorageId{
   906  		MachineTag:    "machine-0",
   907  		AttachmentTag: "filesystem-1",
   908  	}
   909  	filesystemAccessor.provisionedAttachments[alreadyAttached] = params.FilesystemAttachment{
   910  		MachineTag:    "machine-0",
   911  		FilesystemTag: "filesystem-1",
   912  	}
   913  
   914  	args := &workerArgs{filesystems: filesystemAccessor, registry: s.registry}
   915  	worker := newStorageProvisioner(c, args)
   916  	defer func() { c.Assert(worker.Wait(), gc.IsNil) }()
   917  	defer worker.Kill()
   918  
   919  	filesystemAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{
   920  		MachineTag: "machine-1", AttachmentTag: "filesystem-1",
   921  	}, {
   922  		MachineTag: "machine-1", AttachmentTag: "filesystem-2",
   923  	}, {
   924  		MachineTag: "machine-2", AttachmentTag: "filesystem-1",
   925  	}, {
   926  		MachineTag: "machine-0", AttachmentTag: "filesystem-1",
   927  	}}
   928  	assertNoEvent(c, filesystemAttachmentInfoSet, "filesystem attachment info set")
   929  	filesystemAccessor.filesystemsWatcher.changes <- []string{"1"}
   930  	waitChannel(c, filesystemAttachmentInfoSet, "waiting for filesystem attachments to be set")
   931  	c.Assert(allFilesystemAttachments, jc.SameContents, expectedFilesystemAttachments)
   932  
   933  	// Reattachment should only happen once per session.
   934  	filesystemAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{
   935  		MachineTag:    "machine-0",
   936  		AttachmentTag: "filesystem-1",
   937  	}}
   938  	assertNoEvent(c, filesystemAttachmentInfoSet, "filesystem attachment info set")
   939  }
   940  
   941  func (s *storageProvisionerSuite) TestCreateVolumeBackedFilesystem(c *gc.C) {
   942  	filesystemInfoSet := make(chan interface{})
   943  	filesystemAccessor := newMockFilesystemAccessor()
   944  	filesystemAccessor.setFilesystemInfo = func(filesystems []params.Filesystem) ([]params.ErrorResult, error) {
   945  		filesystemInfoSet <- filesystems
   946  		return nil, nil
   947  	}
   948  
   949  	args := &workerArgs{
   950  		scope:       names.NewMachineTag("0"),
   951  		filesystems: filesystemAccessor,
   952  		registry:    s.registry,
   953  	}
   954  	worker := newStorageProvisioner(c, args)
   955  	defer func() { c.Assert(worker.Wait(), gc.IsNil) }()
   956  	defer worker.Kill()
   957  
   958  	args.volumes.blockDevices[params.MachineStorageId{
   959  		MachineTag:    "machine-0",
   960  		AttachmentTag: "volume-0-0",
   961  	}] = storage.BlockDevice{
   962  		DeviceName: "xvdf1",
   963  		Size:       123,
   964  	}
   965  	filesystemAccessor.filesystemsWatcher.changes <- []string{"0/0", "0/1"}
   966  
   967  	// Only the block device for volume 0/0 is attached at the moment,
   968  	// so only the corresponding filesystem will be created.
   969  	filesystemInfo := waitChannel(
   970  		c, filesystemInfoSet,
   971  		"waiting for filesystem info to be set",
   972  	).([]params.Filesystem)
   973  	c.Assert(filesystemInfo, jc.DeepEquals, []params.Filesystem{{
   974  		FilesystemTag: "filesystem-0-0",
   975  		Info: params.FilesystemInfo{
   976  			FilesystemId: "xvdf1",
   977  			Size:         123,
   978  		},
   979  	}})
   980  
   981  	// If we now attach the block device for volume 0/1 and trigger the
   982  	// notification, then the storage provisioner will wake up and create
   983  	// the filesystem.
   984  	args.volumes.blockDevices[params.MachineStorageId{
   985  		MachineTag:    "machine-0",
   986  		AttachmentTag: "volume-0-1",
   987  	}] = storage.BlockDevice{
   988  		DeviceName: "xvdf2",
   989  		Size:       246,
   990  	}
   991  	args.volumes.blockDevicesWatcher.changes <- struct{}{}
   992  	filesystemInfo = waitChannel(
   993  		c, filesystemInfoSet,
   994  		"waiting for filesystem info to be set",
   995  	).([]params.Filesystem)
   996  	c.Assert(filesystemInfo, jc.DeepEquals, []params.Filesystem{{
   997  		FilesystemTag: "filesystem-0-1",
   998  		Info: params.FilesystemInfo{
   999  			FilesystemId: "xvdf2",
  1000  			Size:         246,
  1001  		},
  1002  	}})
  1003  }
  1004  
  1005  func (s *storageProvisionerSuite) TestAttachVolumeBackedFilesystem(c *gc.C) {
  1006  	infoSet := make(chan interface{})
  1007  	filesystemAccessor := newMockFilesystemAccessor()
  1008  	filesystemAccessor.setFilesystemAttachmentInfo = func(attachments []params.FilesystemAttachment) ([]params.ErrorResult, error) {
  1009  		infoSet <- attachments
  1010  		return nil, nil
  1011  	}
  1012  
  1013  	args := &workerArgs{
  1014  		scope:       names.NewMachineTag("0"),
  1015  		filesystems: filesystemAccessor,
  1016  		registry:    s.registry,
  1017  	}
  1018  	worker := newStorageProvisioner(c, args)
  1019  	defer func() { c.Assert(worker.Wait(), gc.IsNil) }()
  1020  	defer worker.Kill()
  1021  
  1022  	filesystemAccessor.provisionedFilesystems["filesystem-0-0"] = params.Filesystem{
  1023  		FilesystemTag: "filesystem-0-0",
  1024  		VolumeTag:     "volume-0-0",
  1025  		Info: params.FilesystemInfo{
  1026  			FilesystemId: "whatever",
  1027  			Size:         123,
  1028  		},
  1029  	}
  1030  	filesystemAccessor.provisionedMachines["machine-0"] = instance.Id("already-provisioned-0")
  1031  
  1032  	args.volumes.blockDevices[params.MachineStorageId{
  1033  		MachineTag:    "machine-0",
  1034  		AttachmentTag: "volume-0-0",
  1035  	}] = storage.BlockDevice{
  1036  		DeviceName: "xvdf1",
  1037  		Size:       123,
  1038  	}
  1039  	filesystemAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{
  1040  		MachineTag:    "machine-0",
  1041  		AttachmentTag: "filesystem-0-0",
  1042  	}}
  1043  	filesystemAccessor.filesystemsWatcher.changes <- []string{"0/0"}
  1044  
  1045  	info := waitChannel(
  1046  		c, infoSet, "waiting for filesystem attachment info to be set",
  1047  	).([]params.FilesystemAttachment)
  1048  	c.Assert(info, jc.DeepEquals, []params.FilesystemAttachment{{
  1049  		FilesystemTag: "filesystem-0-0",
  1050  		MachineTag:    "machine-0",
  1051  		Info: params.FilesystemAttachmentInfo{
  1052  			MountPoint: "/mnt/xvdf1",
  1053  			ReadOnly:   true,
  1054  		},
  1055  	}})
  1056  }
  1057  
  1058  func (s *storageProvisionerSuite) TestResourceTags(c *gc.C) {
  1059  	volumeInfoSet := make(chan interface{})
  1060  	volumeAccessor := newMockVolumeAccessor()
  1061  	volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1")
  1062  	volumeAccessor.setVolumeInfo = func(volumes []params.Volume) ([]params.ErrorResult, error) {
  1063  		defer close(volumeInfoSet)
  1064  		return nil, nil
  1065  	}
  1066  
  1067  	filesystemInfoSet := make(chan interface{})
  1068  	filesystemAccessor := newMockFilesystemAccessor()
  1069  	filesystemAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1")
  1070  	filesystemAccessor.setFilesystemInfo = func(filesystems []params.Filesystem) ([]params.ErrorResult, error) {
  1071  		defer close(filesystemInfoSet)
  1072  		return nil, nil
  1073  	}
  1074  
  1075  	var volumeSource dummyVolumeSource
  1076  	s.provider.volumeSourceFunc = func(sourceConfig *storage.Config) (storage.VolumeSource, error) {
  1077  		return &volumeSource, nil
  1078  	}
  1079  
  1080  	var filesystemSource dummyFilesystemSource
  1081  	s.provider.filesystemSourceFunc = func(sourceConfig *storage.Config) (storage.FilesystemSource, error) {
  1082  		return &filesystemSource, nil
  1083  	}
  1084  
  1085  	args := &workerArgs{
  1086  		volumes:     volumeAccessor,
  1087  		filesystems: filesystemAccessor,
  1088  		registry:    s.registry,
  1089  	}
  1090  	worker := newStorageProvisioner(c, args)
  1091  	defer func() { c.Assert(worker.Wait(), gc.IsNil) }()
  1092  	defer worker.Kill()
  1093  
  1094  	volumeAccessor.volumesWatcher.changes <- []string{"1"}
  1095  	filesystemAccessor.filesystemsWatcher.changes <- []string{"1"}
  1096  	waitChannel(c, volumeInfoSet, "waiting for volume info to be set")
  1097  	waitChannel(c, filesystemInfoSet, "waiting for filesystem info to be set")
  1098  	c.Assert(volumeSource.createVolumesArgs, jc.DeepEquals, [][]storage.VolumeParams{{{
  1099  		Tag:          names.NewVolumeTag("1"),
  1100  		Size:         1024,
  1101  		Provider:     "dummy",
  1102  		Attributes:   map[string]interface{}{"persistent": true},
  1103  		ResourceTags: map[string]string{"very": "fancy"},
  1104  		Attachment: &storage.VolumeAttachmentParams{
  1105  			Volume: names.NewVolumeTag("1"),
  1106  			AttachmentParams: storage.AttachmentParams{
  1107  				Machine:    names.NewMachineTag("1"),
  1108  				Provider:   "dummy",
  1109  				InstanceId: "already-provisioned-1",
  1110  				ReadOnly:   true,
  1111  			},
  1112  		},
  1113  	}}})
  1114  	c.Assert(filesystemSource.createFilesystemsArgs, jc.DeepEquals, [][]storage.FilesystemParams{{{
  1115  		Tag:          names.NewFilesystemTag("1"),
  1116  		Size:         1024,
  1117  		Provider:     "dummy",
  1118  		ResourceTags: map[string]string{"very": "fancy"},
  1119  	}}})
  1120  }
  1121  
  1122  func (s *storageProvisionerSuite) TestSetVolumeInfoErrorStopsWorker(c *gc.C) {
  1123  	volumeAccessor := newMockVolumeAccessor()
  1124  	volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1")
  1125  	volumeAccessor.setVolumeInfo = func(volumes []params.Volume) ([]params.ErrorResult, error) {
  1126  		return nil, errors.New("belly up")
  1127  	}
  1128  
  1129  	args := &workerArgs{volumes: volumeAccessor, registry: s.registry}
  1130  	worker := newStorageProvisioner(c, args)
  1131  	defer worker.Wait()
  1132  	defer worker.Kill()
  1133  
  1134  	done := make(chan interface{})
  1135  	go func() {
  1136  		defer close(done)
  1137  		err := worker.Wait()
  1138  		c.Assert(err, gc.ErrorMatches, "creating volumes: publishing volumes to state: belly up")
  1139  	}()
  1140  
  1141  	args.volumes.volumesWatcher.changes <- []string{"1"}
  1142  	waitChannel(c, done, "waiting for worker to exit")
  1143  }
  1144  
  1145  func (s *storageProvisionerSuite) TestSetVolumeInfoErrorResultDoesNotStopWorker(c *gc.C) {
  1146  	volumeAccessor := newMockVolumeAccessor()
  1147  	volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1")
  1148  	volumeAccessor.setVolumeInfo = func(volumes []params.Volume) ([]params.ErrorResult, error) {
  1149  		return []params.ErrorResult{{Error: &params.Error{Message: "message", Code: "code"}}}, nil
  1150  	}
  1151  
  1152  	args := &workerArgs{volumes: volumeAccessor, registry: s.registry}
  1153  	worker := newStorageProvisioner(c, args)
  1154  	defer func() {
  1155  		err := worker.Wait()
  1156  		c.Assert(err, jc.ErrorIsNil)
  1157  	}()
  1158  	defer worker.Kill()
  1159  
  1160  	done := make(chan interface{})
  1161  	go func() {
  1162  		defer close(done)
  1163  		worker.Wait()
  1164  	}()
  1165  
  1166  	args.volumes.volumesWatcher.changes <- []string{"1"}
  1167  	assertNoEvent(c, done, "worker exited")
  1168  }
  1169  
  1170  func (s *storageProvisionerSuite) TestDetachVolumesUnattached(c *gc.C) {
  1171  	removed := make(chan interface{})
  1172  	removeAttachments := func(ids []params.MachineStorageId) ([]params.ErrorResult, error) {
  1173  		defer close(removed)
  1174  		c.Assert(ids, gc.DeepEquals, []params.MachineStorageId{{
  1175  			MachineTag:    "machine-0",
  1176  			AttachmentTag: "volume-0",
  1177  		}})
  1178  		return make([]params.ErrorResult, len(ids)), nil
  1179  	}
  1180  
  1181  	args := &workerArgs{
  1182  		life:     &mockLifecycleManager{removeAttachments: removeAttachments},
  1183  		registry: s.registry,
  1184  	}
  1185  	worker := newStorageProvisioner(c, args)
  1186  	defer worker.Wait()
  1187  	defer worker.Kill()
  1188  
  1189  	args.volumes.attachmentsWatcher.changes <- []watcher.MachineStorageId{{
  1190  		MachineTag: "machine-0", AttachmentTag: "volume-0",
  1191  	}}
  1192  	waitChannel(c, removed, "waiting for attachment to be removed")
  1193  }
  1194  
  1195  func (s *storageProvisionerSuite) TestDetachVolumes(c *gc.C) {
  1196  	var attached bool
  1197  	volumeAttachmentInfoSet := make(chan interface{})
  1198  	volumeAccessor := newMockVolumeAccessor()
  1199  	volumeAccessor.setVolumeAttachmentInfo = func(volumeAttachments []params.VolumeAttachment) ([]params.ErrorResult, error) {
  1200  		close(volumeAttachmentInfoSet)
  1201  		attached = true
  1202  		for _, a := range volumeAttachments {
  1203  			id := params.MachineStorageId{
  1204  				MachineTag:    a.MachineTag,
  1205  				AttachmentTag: a.VolumeTag,
  1206  			}
  1207  			volumeAccessor.provisionedAttachments[id] = a
  1208  		}
  1209  		return make([]params.ErrorResult, len(volumeAttachments)), nil
  1210  	}
  1211  
  1212  	expectedAttachmentIds := []params.MachineStorageId{{
  1213  		MachineTag: "machine-1", AttachmentTag: "volume-1",
  1214  	}}
  1215  
  1216  	attachmentLife := func(ids []params.MachineStorageId) ([]params.LifeResult, error) {
  1217  		c.Assert(ids, gc.DeepEquals, expectedAttachmentIds)
  1218  		life := params.Alive
  1219  		if attached {
  1220  			life = params.Dying
  1221  		}
  1222  		return []params.LifeResult{{Life: life}}, nil
  1223  	}
  1224  
  1225  	detached := make(chan interface{})
  1226  	s.provider.detachVolumesFunc = func(args []storage.VolumeAttachmentParams) ([]error, error) {
  1227  		c.Assert(args, gc.HasLen, 1)
  1228  		c.Assert(args[0].Machine.String(), gc.Equals, expectedAttachmentIds[0].MachineTag)
  1229  		c.Assert(args[0].Volume.String(), gc.Equals, expectedAttachmentIds[0].AttachmentTag)
  1230  		defer close(detached)
  1231  		return make([]error, len(args)), nil
  1232  	}
  1233  
  1234  	removed := make(chan interface{})
  1235  	removeAttachments := func(ids []params.MachineStorageId) ([]params.ErrorResult, error) {
  1236  		c.Assert(ids, gc.DeepEquals, expectedAttachmentIds)
  1237  		close(removed)
  1238  		return make([]params.ErrorResult, len(ids)), nil
  1239  	}
  1240  
  1241  	// volume-1 and machine-1 are provisioned.
  1242  	volumeAccessor.provisionedVolumes["volume-1"] = params.Volume{
  1243  		VolumeTag: "volume-1",
  1244  		Info: params.VolumeInfo{
  1245  			VolumeId: "vol-123",
  1246  		},
  1247  	}
  1248  	volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1")
  1249  
  1250  	args := &workerArgs{
  1251  		volumes: volumeAccessor,
  1252  		life: &mockLifecycleManager{
  1253  			attachmentLife:    attachmentLife,
  1254  			removeAttachments: removeAttachments,
  1255  		},
  1256  		registry: s.registry,
  1257  	}
  1258  	worker := newStorageProvisioner(c, args)
  1259  	defer func() { c.Assert(worker.Wait(), gc.IsNil) }()
  1260  	defer worker.Kill()
  1261  
  1262  	volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{
  1263  		MachineTag: "machine-1", AttachmentTag: "volume-1",
  1264  	}}
  1265  	volumeAccessor.volumesWatcher.changes <- []string{"1"}
  1266  	waitChannel(c, volumeAttachmentInfoSet, "waiting for volume attachments to be set")
  1267  	volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{
  1268  		MachineTag: "machine-1", AttachmentTag: "volume-1",
  1269  	}}
  1270  	waitChannel(c, detached, "waiting for volume to be detached")
  1271  	waitChannel(c, removed, "waiting for attachment to be removed")
  1272  }
  1273  
  1274  func (s *storageProvisionerSuite) TestDetachVolumesRetry(c *gc.C) {
  1275  	machine := names.NewMachineTag("1")
  1276  	volume := names.NewVolumeTag("1")
  1277  	attachmentId := params.MachineStorageId{
  1278  		MachineTag:    machine.String(),
  1279  		AttachmentTag: volume.String(),
  1280  	}
  1281  	volumeAccessor := newMockVolumeAccessor()
  1282  	volumeAccessor.provisionedAttachments[attachmentId] = params.VolumeAttachment{
  1283  		MachineTag: machine.String(),
  1284  		VolumeTag:  volume.String(),
  1285  	}
  1286  	volumeAccessor.provisionedVolumes[volume.String()] = params.Volume{
  1287  		VolumeTag: volume.String(),
  1288  		Info: params.VolumeInfo{
  1289  			VolumeId: "vol-123",
  1290  		},
  1291  	}
  1292  	volumeAccessor.provisionedMachines[machine.String()] = instance.Id("already-provisioned-1")
  1293  
  1294  	attachmentLife := func(ids []params.MachineStorageId) ([]params.LifeResult, error) {
  1295  		return []params.LifeResult{{Life: params.Dying}}, nil
  1296  	}
  1297  
  1298  	// mockFunc's After will progress the current time by the specified
  1299  	// duration and signal the channel immediately.
  1300  	clock := &mockClock{}
  1301  	var detachVolumeTimes []time.Time
  1302  
  1303  	s.provider.detachVolumesFunc = func(args []storage.VolumeAttachmentParams) ([]error, error) {
  1304  		detachVolumeTimes = append(detachVolumeTimes, clock.Now())
  1305  		if len(detachVolumeTimes) < 10 {
  1306  			return []error{errors.New("badness")}, nil
  1307  		}
  1308  		return []error{nil}, nil
  1309  	}
  1310  
  1311  	removed := make(chan interface{})
  1312  	removeAttachments := func(ids []params.MachineStorageId) ([]params.ErrorResult, error) {
  1313  		close(removed)
  1314  		return make([]params.ErrorResult, len(ids)), nil
  1315  	}
  1316  
  1317  	args := &workerArgs{
  1318  		volumes: volumeAccessor,
  1319  		clock:   clock,
  1320  		life: &mockLifecycleManager{
  1321  			attachmentLife:    attachmentLife,
  1322  			removeAttachments: removeAttachments,
  1323  		},
  1324  		registry: s.registry,
  1325  	}
  1326  	worker := newStorageProvisioner(c, args)
  1327  	defer func() { c.Assert(worker.Wait(), gc.IsNil) }()
  1328  	defer worker.Kill()
  1329  
  1330  	volumeAccessor.volumesWatcher.changes <- []string{volume.Id()}
  1331  	volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{
  1332  		MachineTag:    machine.String(),
  1333  		AttachmentTag: volume.String(),
  1334  	}}
  1335  	waitChannel(c, removed, "waiting for attachment to be removed")
  1336  	c.Assert(detachVolumeTimes, gc.HasLen, 10)
  1337  
  1338  	// The first attempt should have been immediate: T0.
  1339  	c.Assert(detachVolumeTimes[0], gc.Equals, time.Time{})
  1340  
  1341  	delays := make([]time.Duration, len(detachVolumeTimes)-1)
  1342  	for i := range detachVolumeTimes[1:] {
  1343  		delays[i] = detachVolumeTimes[i+1].Sub(detachVolumeTimes[i])
  1344  	}
  1345  	c.Assert(delays, jc.DeepEquals, []time.Duration{
  1346  		30 * time.Second,
  1347  		1 * time.Minute,
  1348  		2 * time.Minute,
  1349  		4 * time.Minute,
  1350  		8 * time.Minute,
  1351  		16 * time.Minute,
  1352  		30 * time.Minute, // ceiling reached
  1353  		30 * time.Minute,
  1354  		30 * time.Minute,
  1355  	})
  1356  
  1357  	c.Assert(args.statusSetter.args, jc.DeepEquals, []params.EntityStatusArgs{
  1358  		{Tag: "volume-1", Status: "detaching", Info: "badness"}, // DetachVolumes
  1359  		{Tag: "volume-1", Status: "detaching", Info: "badness"},
  1360  		{Tag: "volume-1", Status: "detaching", Info: "badness"},
  1361  		{Tag: "volume-1", Status: "detaching", Info: "badness"},
  1362  		{Tag: "volume-1", Status: "detaching", Info: "badness"},
  1363  		{Tag: "volume-1", Status: "detaching", Info: "badness"},
  1364  		{Tag: "volume-1", Status: "detaching", Info: "badness"},
  1365  		{Tag: "volume-1", Status: "detaching", Info: "badness"},
  1366  		{Tag: "volume-1", Status: "detaching", Info: "badness"},
  1367  		{Tag: "volume-1", Status: "detached", Info: ""},
  1368  	})
  1369  }
  1370  
  1371  func (s *storageProvisionerSuite) TestDetachFilesystemsUnattached(c *gc.C) {
  1372  	removed := make(chan interface{})
  1373  	removeAttachments := func(ids []params.MachineStorageId) ([]params.ErrorResult, error) {
  1374  		defer close(removed)
  1375  		c.Assert(ids, gc.DeepEquals, []params.MachineStorageId{{
  1376  			MachineTag:    "machine-0",
  1377  			AttachmentTag: "filesystem-0",
  1378  		}})
  1379  		return make([]params.ErrorResult, len(ids)), nil
  1380  	}
  1381  
  1382  	args := &workerArgs{
  1383  		life:     &mockLifecycleManager{removeAttachments: removeAttachments},
  1384  		registry: s.registry,
  1385  	}
  1386  	worker := newStorageProvisioner(c, args)
  1387  	defer worker.Wait()
  1388  	defer worker.Kill()
  1389  
  1390  	args.filesystems.attachmentsWatcher.changes <- []watcher.MachineStorageId{{
  1391  		MachineTag: "machine-0", AttachmentTag: "filesystem-0",
  1392  	}}
  1393  	waitChannel(c, removed, "waiting for attachment to be removed")
  1394  }
  1395  
  1396  func (s *storageProvisionerSuite) TestDetachFilesystems(c *gc.C) {
  1397  	var attached bool
  1398  	filesystemAttachmentInfoSet := make(chan interface{})
  1399  	filesystemAccessor := newMockFilesystemAccessor()
  1400  	filesystemAccessor.setFilesystemAttachmentInfo = func(filesystemAttachments []params.FilesystemAttachment) ([]params.ErrorResult, error) {
  1401  		close(filesystemAttachmentInfoSet)
  1402  		attached = true
  1403  		for _, a := range filesystemAttachments {
  1404  			id := params.MachineStorageId{
  1405  				MachineTag:    a.MachineTag,
  1406  				AttachmentTag: a.FilesystemTag,
  1407  			}
  1408  			filesystemAccessor.provisionedAttachments[id] = a
  1409  		}
  1410  		return make([]params.ErrorResult, len(filesystemAttachments)), nil
  1411  	}
  1412  
  1413  	expectedAttachmentIds := []params.MachineStorageId{{
  1414  		MachineTag: "machine-1", AttachmentTag: "filesystem-1",
  1415  	}}
  1416  
  1417  	attachmentLife := func(ids []params.MachineStorageId) ([]params.LifeResult, error) {
  1418  		c.Assert(ids, gc.DeepEquals, expectedAttachmentIds)
  1419  		life := params.Alive
  1420  		if attached {
  1421  			life = params.Dying
  1422  		}
  1423  		return []params.LifeResult{{Life: life}}, nil
  1424  	}
  1425  
  1426  	detached := make(chan interface{})
  1427  	s.provider.detachFilesystemsFunc = func(args []storage.FilesystemAttachmentParams) ([]error, error) {
  1428  		c.Assert(args, gc.HasLen, 1)
  1429  		c.Assert(args[0].Machine.String(), gc.Equals, expectedAttachmentIds[0].MachineTag)
  1430  		c.Assert(args[0].Filesystem.String(), gc.Equals, expectedAttachmentIds[0].AttachmentTag)
  1431  		defer close(detached)
  1432  		return make([]error, len(args)), nil
  1433  	}
  1434  
  1435  	removed := make(chan interface{})
  1436  	removeAttachments := func(ids []params.MachineStorageId) ([]params.ErrorResult, error) {
  1437  		c.Assert(ids, gc.DeepEquals, expectedAttachmentIds)
  1438  		close(removed)
  1439  		return make([]params.ErrorResult, len(ids)), nil
  1440  	}
  1441  
  1442  	// filesystem-1 and machine-1 are provisioned.
  1443  	filesystemAccessor.provisionedFilesystems["filesystem-1"] = params.Filesystem{
  1444  		FilesystemTag: "filesystem-1",
  1445  		Info: params.FilesystemInfo{
  1446  			FilesystemId: "fs-id",
  1447  		},
  1448  	}
  1449  	filesystemAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1")
  1450  
  1451  	args := &workerArgs{
  1452  		filesystems: filesystemAccessor,
  1453  		life: &mockLifecycleManager{
  1454  			attachmentLife:    attachmentLife,
  1455  			removeAttachments: removeAttachments,
  1456  		},
  1457  		registry: s.registry,
  1458  	}
  1459  	worker := newStorageProvisioner(c, args)
  1460  	defer func() { c.Assert(worker.Wait(), gc.IsNil) }()
  1461  	defer worker.Kill()
  1462  
  1463  	filesystemAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{
  1464  		MachineTag: "machine-1", AttachmentTag: "filesystem-1",
  1465  	}}
  1466  	filesystemAccessor.filesystemsWatcher.changes <- []string{"1"}
  1467  	waitChannel(c, filesystemAttachmentInfoSet, "waiting for filesystem attachments to be set")
  1468  	filesystemAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{
  1469  		MachineTag: "machine-1", AttachmentTag: "filesystem-1",
  1470  	}}
  1471  	waitChannel(c, detached, "waiting for filesystem to be detached")
  1472  	waitChannel(c, removed, "waiting for attachment to be removed")
  1473  }
  1474  
  1475  func (s *storageProvisionerSuite) TestDestroyVolumes(c *gc.C) {
  1476  	provisionedVolume := names.NewVolumeTag("1")
  1477  	unprovisionedVolume := names.NewVolumeTag("2")
  1478  
  1479  	volumeAccessor := newMockVolumeAccessor()
  1480  	volumeAccessor.provisionVolume(provisionedVolume)
  1481  
  1482  	life := func(tags []names.Tag) ([]params.LifeResult, error) {
  1483  		results := make([]params.LifeResult, len(tags))
  1484  		for i := range results {
  1485  			results[i].Life = params.Dead
  1486  		}
  1487  		return results, nil
  1488  	}
  1489  
  1490  	destroyedChan := make(chan interface{}, 1)
  1491  	s.provider.destroyVolumesFunc = func(volumeIds []string) ([]error, error) {
  1492  		destroyedChan <- volumeIds
  1493  		return make([]error, len(volumeIds)), nil
  1494  	}
  1495  
  1496  	removedChan := make(chan interface{}, 1)
  1497  	remove := func(tags []names.Tag) ([]params.ErrorResult, error) {
  1498  		removedChan <- tags
  1499  		return make([]params.ErrorResult, len(tags)), nil
  1500  	}
  1501  
  1502  	args := &workerArgs{
  1503  		volumes: volumeAccessor,
  1504  		life: &mockLifecycleManager{
  1505  			life:   life,
  1506  			remove: remove,
  1507  		},
  1508  		registry: s.registry,
  1509  	}
  1510  	worker := newStorageProvisioner(c, args)
  1511  	defer func() { c.Assert(worker.Wait(), gc.IsNil) }()
  1512  	defer worker.Kill()
  1513  
  1514  	volumeAccessor.volumesWatcher.changes <- []string{
  1515  		provisionedVolume.Id(),
  1516  		unprovisionedVolume.Id(),
  1517  	}
  1518  
  1519  	// Both volumes should be removed; the provisioned one
  1520  	// should be deprovisioned first.
  1521  
  1522  	destroyed := waitChannel(c, destroyedChan, "waiting for volume to be deprovisioned")
  1523  	assertNoEvent(c, destroyedChan, "volumes deprovisioned")
  1524  	c.Assert(destroyed, jc.DeepEquals, []string{"vol-1"})
  1525  
  1526  	var removed []names.Tag
  1527  	for len(removed) < 2 {
  1528  		tags := waitChannel(c, removedChan, "waiting for volumes to be removed").([]names.Tag)
  1529  		removed = append(removed, tags...)
  1530  	}
  1531  	c.Assert(removed, jc.SameContents, []names.Tag{provisionedVolume, unprovisionedVolume})
  1532  	assertNoEvent(c, removedChan, "volumes removed")
  1533  }
  1534  
  1535  func (s *storageProvisionerSuite) TestDestroyVolumesRetry(c *gc.C) {
  1536  	volume := names.NewVolumeTag("1")
  1537  	volumeAccessor := newMockVolumeAccessor()
  1538  	volumeAccessor.provisionVolume(volume)
  1539  
  1540  	life := func(tags []names.Tag) ([]params.LifeResult, error) {
  1541  		return []params.LifeResult{{Life: params.Dead}}, nil
  1542  	}
  1543  
  1544  	// mockFunc's After will progress the current time by the specified
  1545  	// duration and signal the channel immediately.
  1546  	clock := &mockClock{}
  1547  	var destroyVolumeTimes []time.Time
  1548  
  1549  	s.provider.destroyVolumesFunc = func(volumeIds []string) ([]error, error) {
  1550  		destroyVolumeTimes = append(destroyVolumeTimes, clock.Now())
  1551  		if len(destroyVolumeTimes) < 10 {
  1552  			return []error{errors.New("badness")}, nil
  1553  		}
  1554  		return []error{nil}, nil
  1555  	}
  1556  
  1557  	removedChan := make(chan interface{}, 1)
  1558  	remove := func(tags []names.Tag) ([]params.ErrorResult, error) {
  1559  		removedChan <- tags
  1560  		return make([]params.ErrorResult, len(tags)), nil
  1561  	}
  1562  
  1563  	args := &workerArgs{
  1564  		volumes: volumeAccessor,
  1565  		clock:   clock,
  1566  		life: &mockLifecycleManager{
  1567  			life:   life,
  1568  			remove: remove,
  1569  		},
  1570  		registry: s.registry,
  1571  	}
  1572  	worker := newStorageProvisioner(c, args)
  1573  	defer func() { c.Assert(worker.Wait(), gc.IsNil) }()
  1574  	defer worker.Kill()
  1575  
  1576  	volumeAccessor.volumesWatcher.changes <- []string{volume.Id()}
  1577  	waitChannel(c, removedChan, "waiting for volume to be removed")
  1578  	c.Assert(destroyVolumeTimes, gc.HasLen, 10)
  1579  
  1580  	// The first attempt should have been immediate: T0.
  1581  	c.Assert(destroyVolumeTimes[0], gc.Equals, time.Time{})
  1582  
  1583  	delays := make([]time.Duration, len(destroyVolumeTimes)-1)
  1584  	for i := range destroyVolumeTimes[1:] {
  1585  		delays[i] = destroyVolumeTimes[i+1].Sub(destroyVolumeTimes[i])
  1586  	}
  1587  	c.Assert(delays, jc.DeepEquals, []time.Duration{
  1588  		30 * time.Second,
  1589  		1 * time.Minute,
  1590  		2 * time.Minute,
  1591  		4 * time.Minute,
  1592  		8 * time.Minute,
  1593  		16 * time.Minute,
  1594  		30 * time.Minute, // ceiling reached
  1595  		30 * time.Minute,
  1596  		30 * time.Minute,
  1597  	})
  1598  
  1599  	c.Assert(args.statusSetter.args, jc.DeepEquals, []params.EntityStatusArgs{
  1600  		{Tag: "volume-1", Status: "destroying", Info: "badness"},
  1601  		{Tag: "volume-1", Status: "destroying", Info: "badness"},
  1602  		{Tag: "volume-1", Status: "destroying", Info: "badness"},
  1603  		{Tag: "volume-1", Status: "destroying", Info: "badness"},
  1604  		{Tag: "volume-1", Status: "destroying", Info: "badness"},
  1605  		{Tag: "volume-1", Status: "destroying", Info: "badness"},
  1606  		{Tag: "volume-1", Status: "destroying", Info: "badness"},
  1607  		{Tag: "volume-1", Status: "destroying", Info: "badness"},
  1608  		{Tag: "volume-1", Status: "destroying", Info: "badness"},
  1609  	})
  1610  }
  1611  
  1612  func (s *storageProvisionerSuite) TestDestroyFilesystems(c *gc.C) {
  1613  	provisionedFilesystem := names.NewFilesystemTag("1")
  1614  	unprovisionedFilesystem := names.NewFilesystemTag("2")
  1615  
  1616  	filesystemAccessor := newMockFilesystemAccessor()
  1617  	filesystemAccessor.provisionFilesystem(provisionedFilesystem)
  1618  
  1619  	life := func(tags []names.Tag) ([]params.LifeResult, error) {
  1620  		results := make([]params.LifeResult, len(tags))
  1621  		for i := range results {
  1622  			results[i].Life = params.Dead
  1623  		}
  1624  		return results, nil
  1625  	}
  1626  
  1627  	removedChan := make(chan interface{}, 1)
  1628  	remove := func(tags []names.Tag) ([]params.ErrorResult, error) {
  1629  		removedChan <- tags
  1630  		return make([]params.ErrorResult, len(tags)), nil
  1631  	}
  1632  
  1633  	args := &workerArgs{
  1634  		filesystems: filesystemAccessor,
  1635  		life: &mockLifecycleManager{
  1636  			life:   life,
  1637  			remove: remove,
  1638  		},
  1639  		registry: s.registry,
  1640  	}
  1641  	worker := newStorageProvisioner(c, args)
  1642  	defer func() { c.Assert(worker.Wait(), gc.IsNil) }()
  1643  	defer worker.Kill()
  1644  
  1645  	filesystemAccessor.filesystemsWatcher.changes <- []string{
  1646  		provisionedFilesystem.Id(),
  1647  		unprovisionedFilesystem.Id(),
  1648  	}
  1649  
  1650  	// Both filesystems should be removed; the provisioned one
  1651  	// *should* be deprovisioned first, but we don't currently
  1652  	// have the ability to do so via the storage provider API.
  1653  
  1654  	var removed []names.Tag
  1655  	for len(removed) < 2 {
  1656  		tags := waitChannel(c, removedChan, "waiting for filesystems to be removed").([]names.Tag)
  1657  		removed = append(removed, tags...)
  1658  	}
  1659  	c.Assert(removed, jc.SameContents, []names.Tag{provisionedFilesystem, unprovisionedFilesystem})
  1660  	assertNoEvent(c, removedChan, "filesystems removed")
  1661  }
  1662  
  1663  func newStorageProvisioner(c *gc.C, args *workerArgs) worker.Worker {
  1664  	if args == nil {
  1665  		args = &workerArgs{}
  1666  	}
  1667  	var storageDir string
  1668  	switch args.scope.(type) {
  1669  	case names.MachineTag:
  1670  		storageDir = "storage-dir"
  1671  	case names.ModelTag:
  1672  	case nil:
  1673  		args.scope = coretesting.ModelTag
  1674  	}
  1675  	if args.volumes == nil {
  1676  		args.volumes = newMockVolumeAccessor()
  1677  	}
  1678  	if args.filesystems == nil {
  1679  		args.filesystems = newMockFilesystemAccessor()
  1680  	}
  1681  	if args.life == nil {
  1682  		args.life = &mockLifecycleManager{}
  1683  	}
  1684  	if args.machines == nil {
  1685  		args.machines = newMockMachineAccessor(c)
  1686  	}
  1687  	if args.clock == nil {
  1688  		args.clock = &mockClock{}
  1689  	}
  1690  	if args.statusSetter == nil {
  1691  		args.statusSetter = &mockStatusSetter{}
  1692  	}
  1693  	worker, err := storageprovisioner.NewStorageProvisioner(storageprovisioner.Config{
  1694  		Scope:       args.scope,
  1695  		StorageDir:  storageDir,
  1696  		Volumes:     args.volumes,
  1697  		Filesystems: args.filesystems,
  1698  		Life:        args.life,
  1699  		Registry:    args.registry,
  1700  		Machines:    args.machines,
  1701  		Status:      args.statusSetter,
  1702  		Clock:       args.clock,
  1703  	})
  1704  	c.Assert(err, jc.ErrorIsNil)
  1705  	return worker
  1706  }
  1707  
  1708  type workerArgs struct {
  1709  	scope        names.Tag
  1710  	volumes      *mockVolumeAccessor
  1711  	filesystems  *mockFilesystemAccessor
  1712  	life         *mockLifecycleManager
  1713  	registry     storage.ProviderRegistry
  1714  	machines     *mockMachineAccessor
  1715  	clock        clock.Clock
  1716  	statusSetter *mockStatusSetter
  1717  }
  1718  
  1719  func waitChannel(c *gc.C, ch <-chan interface{}, activity string) interface{} {
  1720  	select {
  1721  	case v := <-ch:
  1722  		return v
  1723  	case <-time.After(coretesting.LongWait):
  1724  		c.Fatalf("timed out " + activity)
  1725  		panic("unreachable")
  1726  	}
  1727  }
  1728  
  1729  func assertNoEvent(c *gc.C, ch <-chan interface{}, event string) {
  1730  	select {
  1731  	case <-ch:
  1732  		c.Fatalf("unexpected " + event)
  1733  	case <-time.After(coretesting.ShortWait):
  1734  	}
  1735  }