github.com/cloud-green/juju@v0.0.0-20151002100041-a00291338d3d/worker/storageprovisioner/storageprovisioner_test.go (about) 1 // Copyright 2015 Canonical Ltd. 2 // Licensed under the AGPLv3, see LICENCE file for details. 3 4 package storageprovisioner_test 5 6 import ( 7 "errors" 8 "time" 9 10 "github.com/juju/names" 11 jc "github.com/juju/testing/checkers" 12 "github.com/juju/utils/clock" 13 gc "gopkg.in/check.v1" 14 15 "github.com/juju/juju/apiserver/params" 16 "github.com/juju/juju/environs/config" 17 "github.com/juju/juju/instance" 18 "github.com/juju/juju/storage" 19 "github.com/juju/juju/storage/provider/registry" 20 coretesting "github.com/juju/juju/testing" 21 "github.com/juju/juju/worker" 22 "github.com/juju/juju/worker/storageprovisioner" 23 ) 24 25 type storageProvisionerSuite struct { 26 coretesting.BaseSuite 27 provider *dummyProvider 28 managedFilesystemSource *mockManagedFilesystemSource 29 } 30 31 var _ = gc.Suite(&storageProvisionerSuite{}) 32 33 func (s *storageProvisionerSuite) SetUpTest(c *gc.C) { 34 s.BaseSuite.SetUpTest(c) 35 s.provider = &dummyProvider{dynamic: true} 36 registry.RegisterProvider("dummy", s.provider) 37 s.AddCleanup(func(*gc.C) { 38 registry.RegisterProvider("dummy", nil) 39 }) 40 41 s.managedFilesystemSource = nil 42 s.PatchValue( 43 storageprovisioner.NewManagedFilesystemSource, 44 func( 45 blockDevices map[names.VolumeTag]storage.BlockDevice, 46 filesystems map[names.FilesystemTag]storage.Filesystem, 47 ) storage.FilesystemSource { 48 s.managedFilesystemSource = &mockManagedFilesystemSource{ 49 blockDevices: blockDevices, 50 filesystems: filesystems, 51 } 52 return s.managedFilesystemSource 53 }, 54 ) 55 } 56 57 func (s *storageProvisionerSuite) TestStartStop(c *gc.C) { 58 worker := storageprovisioner.NewStorageProvisioner( 59 coretesting.EnvironmentTag, 60 "dir", 61 newMockVolumeAccessor(), 62 newMockFilesystemAccessor(), 63 &mockLifecycleManager{}, 64 newMockEnvironAccessor(c), 65 newMockMachineAccessor(c), 66 &mockStatusSetter{}, 67 &mockClock{}, 68 ) 69 worker.Kill() 70 c.Assert(worker.Wait(), gc.IsNil) 71 } 72 73 func (s *storageProvisionerSuite) TestVolumeAdded(c *gc.C) { 74 expectedVolumes := []params.Volume{{ 75 VolumeTag: "volume-1", 76 Info: params.VolumeInfo{ 77 VolumeId: "id-1", 78 HardwareId: "serial-1", 79 Size: 1024, 80 Persistent: true, 81 }, 82 }, { 83 VolumeTag: "volume-2", 84 Info: params.VolumeInfo{ 85 VolumeId: "id-2", 86 HardwareId: "serial-2", 87 Size: 1024, 88 }, 89 }} 90 expectedVolumeAttachments := []params.VolumeAttachment{{ 91 VolumeTag: "volume-1", 92 MachineTag: "machine-1", 93 Info: params.VolumeAttachmentInfo{ 94 DeviceName: "/dev/sda1", 95 ReadOnly: true, 96 }, 97 }, { 98 VolumeTag: "volume-2", 99 MachineTag: "machine-1", 100 Info: params.VolumeAttachmentInfo{ 101 DeviceName: "/dev/sda2", 102 }, 103 }} 104 105 volumeInfoSet := make(chan interface{}) 106 volumeAccessor := newMockVolumeAccessor() 107 volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 108 volumeAccessor.setVolumeInfo = func(volumes []params.Volume) ([]params.ErrorResult, error) { 109 defer close(volumeInfoSet) 110 c.Assert(volumes, jc.SameContents, expectedVolumes) 111 return nil, nil 112 } 113 114 volumeAttachmentInfoSet := make(chan interface{}) 115 volumeAccessor.setVolumeAttachmentInfo = func(volumeAttachments []params.VolumeAttachment) ([]params.ErrorResult, error) { 116 defer close(volumeAttachmentInfoSet) 117 c.Assert(volumeAttachments, jc.SameContents, expectedVolumeAttachments) 118 return nil, nil 119 } 120 121 args := &workerArgs{volumes: volumeAccessor} 122 worker := newStorageProvisioner(c, args) 123 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 124 defer worker.Kill() 125 126 volumeAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{ 127 MachineTag: "machine-1", AttachmentTag: "volume-1", 128 }, { 129 MachineTag: "machine-1", AttachmentTag: "volume-2", 130 }} 131 assertNoEvent(c, volumeAttachmentInfoSet, "volume attachment set") 132 133 // The worker should create volumes according to ids "1" and "2". 134 volumeAccessor.volumesWatcher.changes <- []string{"1", "2"} 135 // ... but not until the environment config is available. 136 assertNoEvent(c, volumeInfoSet, "volume info set") 137 assertNoEvent(c, volumeAttachmentInfoSet, "volume attachment info set") 138 args.environ.watcher.changes <- struct{}{} 139 waitChannel(c, volumeInfoSet, "waiting for volume info to be set") 140 waitChannel(c, volumeAttachmentInfoSet, "waiting for volume attachments to be set") 141 } 142 143 func (s *storageProvisionerSuite) TestCreateVolumeCreatesAttachment(c *gc.C) { 144 volumeAccessor := newMockVolumeAccessor() 145 volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 146 147 volumeAttachmentInfoSet := make(chan interface{}) 148 volumeAccessor.setVolumeAttachmentInfo = func(volumeAttachments []params.VolumeAttachment) ([]params.ErrorResult, error) { 149 defer close(volumeAttachmentInfoSet) 150 return make([]params.ErrorResult, len(volumeAttachments)), nil 151 } 152 153 s.provider.createVolumesFunc = func(args []storage.VolumeParams) ([]storage.CreateVolumesResult, error) { 154 volumeAccessor.provisionedAttachments[params.MachineStorageId{ 155 MachineTag: args[0].Attachment.Machine.String(), 156 AttachmentTag: args[0].Attachment.Volume.String(), 157 }] = params.VolumeAttachment{ 158 VolumeTag: args[0].Attachment.Volume.String(), 159 MachineTag: args[0].Attachment.Machine.String(), 160 } 161 return []storage.CreateVolumesResult{{ 162 Volume: &storage.Volume{ 163 Tag: args[0].Tag, 164 VolumeInfo: storage.VolumeInfo{ 165 VolumeId: "vol-ume", 166 }, 167 }, 168 VolumeAttachment: &storage.VolumeAttachment{ 169 Volume: args[0].Attachment.Volume, 170 Machine: args[0].Attachment.Machine, 171 }, 172 }}, nil 173 } 174 175 attachVolumesCalled := make(chan interface{}) 176 s.provider.attachVolumesFunc = func(args []storage.VolumeAttachmentParams) ([]storage.AttachVolumesResult, error) { 177 defer close(attachVolumesCalled) 178 return nil, errors.New("should not be called") 179 } 180 181 args := &workerArgs{volumes: volumeAccessor} 182 worker := newStorageProvisioner(c, args) 183 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 184 defer worker.Kill() 185 186 volumeAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{ 187 MachineTag: "machine-1", AttachmentTag: "volume-1", 188 }} 189 assertNoEvent(c, volumeAttachmentInfoSet, "volume attachment set") 190 191 // The worker should create volumes according to ids "1". 192 volumeAccessor.volumesWatcher.changes <- []string{"1"} 193 args.environ.watcher.changes <- struct{}{} 194 waitChannel(c, volumeAttachmentInfoSet, "waiting for volume attachments to be set") 195 assertNoEvent(c, attachVolumesCalled, "AttachVolumes called") 196 } 197 198 func (s *storageProvisionerSuite) TestCreateVolumeRetry(c *gc.C) { 199 volumeInfoSet := make(chan interface{}) 200 volumeAccessor := newMockVolumeAccessor() 201 volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 202 volumeAccessor.setVolumeInfo = func(volumes []params.Volume) ([]params.ErrorResult, error) { 203 defer close(volumeInfoSet) 204 return make([]params.ErrorResult, len(volumes)), nil 205 } 206 207 // mockFunc's After will progress the current time by the specified 208 // duration and signal the channel immediately. 209 clock := &mockClock{} 210 var createVolumeTimes []time.Time 211 212 s.provider.createVolumesFunc = func(args []storage.VolumeParams) ([]storage.CreateVolumesResult, error) { 213 createVolumeTimes = append(createVolumeTimes, clock.Now()) 214 if len(createVolumeTimes) < 10 { 215 return []storage.CreateVolumesResult{{Error: errors.New("badness")}}, nil 216 } 217 return []storage.CreateVolumesResult{{ 218 Volume: &storage.Volume{Tag: args[0].Tag}, 219 }}, nil 220 } 221 222 args := &workerArgs{volumes: volumeAccessor, clock: clock} 223 worker := newStorageProvisioner(c, args) 224 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 225 defer worker.Kill() 226 227 volumeAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{ 228 MachineTag: "machine-1", AttachmentTag: "volume-1", 229 }} 230 volumeAccessor.volumesWatcher.changes <- []string{"1"} 231 args.environ.watcher.changes <- struct{}{} 232 waitChannel(c, volumeInfoSet, "waiting for volume info to be set") 233 c.Assert(createVolumeTimes, gc.HasLen, 10) 234 235 // The first attempt should have been immediate: T0. 236 c.Assert(createVolumeTimes[0], gc.Equals, time.Time{}) 237 238 delays := make([]time.Duration, len(createVolumeTimes)-1) 239 for i := range createVolumeTimes[1:] { 240 delays[i] = createVolumeTimes[i+1].Sub(createVolumeTimes[i]) 241 } 242 c.Assert(delays, jc.DeepEquals, []time.Duration{ 243 30 * time.Second, 244 1 * time.Minute, 245 2 * time.Minute, 246 4 * time.Minute, 247 8 * time.Minute, 248 16 * time.Minute, 249 30 * time.Minute, // ceiling reached 250 30 * time.Minute, 251 30 * time.Minute, 252 }) 253 254 c.Assert(args.statusSetter.args, jc.DeepEquals, []params.EntityStatusArgs{ 255 {Tag: "volume-1", Status: "pending", Info: "badness"}, 256 {Tag: "volume-1", Status: "pending", Info: "badness"}, 257 {Tag: "volume-1", Status: "pending", Info: "badness"}, 258 {Tag: "volume-1", Status: "pending", Info: "badness"}, 259 {Tag: "volume-1", Status: "pending", Info: "badness"}, 260 {Tag: "volume-1", Status: "pending", Info: "badness"}, 261 {Tag: "volume-1", Status: "pending", Info: "badness"}, 262 {Tag: "volume-1", Status: "pending", Info: "badness"}, 263 {Tag: "volume-1", Status: "pending", Info: "badness"}, 264 {Tag: "volume-1", Status: "attaching", Info: ""}, 265 }) 266 } 267 268 func (s *storageProvisionerSuite) TestCreateFilesystemRetry(c *gc.C) { 269 filesystemInfoSet := make(chan interface{}) 270 filesystemAccessor := newMockFilesystemAccessor() 271 filesystemAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 272 filesystemAccessor.setFilesystemInfo = func(filesystems []params.Filesystem) ([]params.ErrorResult, error) { 273 defer close(filesystemInfoSet) 274 return make([]params.ErrorResult, len(filesystems)), nil 275 } 276 277 // mockFunc's After will progress the current time by the specified 278 // duration and signal the channel immediately. 279 clock := &mockClock{} 280 var createFilesystemTimes []time.Time 281 282 s.provider.createFilesystemsFunc = func(args []storage.FilesystemParams) ([]storage.CreateFilesystemsResult, error) { 283 createFilesystemTimes = append(createFilesystemTimes, clock.Now()) 284 if len(createFilesystemTimes) < 10 { 285 return []storage.CreateFilesystemsResult{{Error: errors.New("badness")}}, nil 286 } 287 return []storage.CreateFilesystemsResult{{ 288 Filesystem: &storage.Filesystem{Tag: args[0].Tag}, 289 }}, nil 290 } 291 292 args := &workerArgs{filesystems: filesystemAccessor, clock: clock} 293 worker := newStorageProvisioner(c, args) 294 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 295 defer worker.Kill() 296 297 filesystemAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{ 298 MachineTag: "machine-1", AttachmentTag: "filesystem-1", 299 }} 300 filesystemAccessor.filesystemsWatcher.changes <- []string{"1"} 301 args.environ.watcher.changes <- struct{}{} 302 waitChannel(c, filesystemInfoSet, "waiting for filesystem info to be set") 303 c.Assert(createFilesystemTimes, gc.HasLen, 10) 304 305 // The first attempt should have been immediate: T0. 306 c.Assert(createFilesystemTimes[0], gc.Equals, time.Time{}) 307 308 delays := make([]time.Duration, len(createFilesystemTimes)-1) 309 for i := range createFilesystemTimes[1:] { 310 delays[i] = createFilesystemTimes[i+1].Sub(createFilesystemTimes[i]) 311 } 312 c.Assert(delays, jc.DeepEquals, []time.Duration{ 313 30 * time.Second, 314 1 * time.Minute, 315 2 * time.Minute, 316 4 * time.Minute, 317 8 * time.Minute, 318 16 * time.Minute, 319 30 * time.Minute, // ceiling reached 320 30 * time.Minute, 321 30 * time.Minute, 322 }) 323 324 c.Assert(args.statusSetter.args, jc.DeepEquals, []params.EntityStatusArgs{ 325 {Tag: "filesystem-1", Status: "pending", Info: "badness"}, 326 {Tag: "filesystem-1", Status: "pending", Info: "badness"}, 327 {Tag: "filesystem-1", Status: "pending", Info: "badness"}, 328 {Tag: "filesystem-1", Status: "pending", Info: "badness"}, 329 {Tag: "filesystem-1", Status: "pending", Info: "badness"}, 330 {Tag: "filesystem-1", Status: "pending", Info: "badness"}, 331 {Tag: "filesystem-1", Status: "pending", Info: "badness"}, 332 {Tag: "filesystem-1", Status: "pending", Info: "badness"}, 333 {Tag: "filesystem-1", Status: "pending", Info: "badness"}, 334 {Tag: "filesystem-1", Status: "attaching", Info: ""}, 335 }) 336 } 337 338 func (s *storageProvisionerSuite) TestAttachVolumeRetry(c *gc.C) { 339 volumeInfoSet := make(chan interface{}) 340 volumeAccessor := newMockVolumeAccessor() 341 volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 342 volumeAccessor.setVolumeInfo = func(volumes []params.Volume) ([]params.ErrorResult, error) { 343 defer close(volumeInfoSet) 344 return make([]params.ErrorResult, len(volumes)), nil 345 } 346 volumeAttachmentInfoSet := make(chan interface{}) 347 volumeAccessor.setVolumeAttachmentInfo = func(volumeAttachments []params.VolumeAttachment) ([]params.ErrorResult, error) { 348 defer close(volumeAttachmentInfoSet) 349 return make([]params.ErrorResult, len(volumeAttachments)), nil 350 } 351 352 // mockFunc's After will progress the current time by the specified 353 // duration and signal the channel immediately. 354 clock := &mockClock{} 355 var attachVolumeTimes []time.Time 356 357 s.provider.attachVolumesFunc = func(args []storage.VolumeAttachmentParams) ([]storage.AttachVolumesResult, error) { 358 attachVolumeTimes = append(attachVolumeTimes, clock.Now()) 359 if len(attachVolumeTimes) < 10 { 360 return []storage.AttachVolumesResult{{Error: errors.New("badness")}}, nil 361 } 362 return []storage.AttachVolumesResult{{ 363 VolumeAttachment: &storage.VolumeAttachment{ 364 args[0].Volume, 365 args[0].Machine, 366 storage.VolumeAttachmentInfo{ 367 DeviceName: "/dev/sda1", 368 }, 369 }, 370 }}, nil 371 } 372 373 args := &workerArgs{volumes: volumeAccessor, clock: clock} 374 worker := newStorageProvisioner(c, args) 375 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 376 defer worker.Kill() 377 378 volumeAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{ 379 MachineTag: "machine-1", AttachmentTag: "volume-1", 380 }} 381 volumeAccessor.volumesWatcher.changes <- []string{"1"} 382 args.environ.watcher.changes <- struct{}{} 383 waitChannel(c, volumeInfoSet, "waiting for volume info to be set") 384 waitChannel(c, volumeAttachmentInfoSet, "waiting for volume attachments to be set") 385 c.Assert(attachVolumeTimes, gc.HasLen, 10) 386 387 // The first attempt should have been immediate: T0. 388 c.Assert(attachVolumeTimes[0], gc.Equals, time.Time{}) 389 390 delays := make([]time.Duration, len(attachVolumeTimes)-1) 391 for i := range attachVolumeTimes[1:] { 392 delays[i] = attachVolumeTimes[i+1].Sub(attachVolumeTimes[i]) 393 } 394 c.Assert(delays, jc.DeepEquals, []time.Duration{ 395 30 * time.Second, 396 1 * time.Minute, 397 2 * time.Minute, 398 4 * time.Minute, 399 8 * time.Minute, 400 16 * time.Minute, 401 30 * time.Minute, // ceiling reached 402 30 * time.Minute, 403 30 * time.Minute, 404 }) 405 406 c.Assert(args.statusSetter.args, jc.DeepEquals, []params.EntityStatusArgs{ 407 {Tag: "volume-1", Status: "attaching", Info: ""}, // CreateVolumes 408 {Tag: "volume-1", Status: "attaching", Info: "badness"}, // AttachVolumes 409 {Tag: "volume-1", Status: "attaching", Info: "badness"}, 410 {Tag: "volume-1", Status: "attaching", Info: "badness"}, 411 {Tag: "volume-1", Status: "attaching", Info: "badness"}, 412 {Tag: "volume-1", Status: "attaching", Info: "badness"}, 413 {Tag: "volume-1", Status: "attaching", Info: "badness"}, 414 {Tag: "volume-1", Status: "attaching", Info: "badness"}, 415 {Tag: "volume-1", Status: "attaching", Info: "badness"}, 416 {Tag: "volume-1", Status: "attaching", Info: "badness"}, 417 {Tag: "volume-1", Status: "attached", Info: ""}, 418 }) 419 } 420 421 func (s *storageProvisionerSuite) TestAttachFilesystemRetry(c *gc.C) { 422 filesystemInfoSet := make(chan interface{}) 423 filesystemAccessor := newMockFilesystemAccessor() 424 filesystemAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 425 filesystemAccessor.setFilesystemInfo = func(filesystems []params.Filesystem) ([]params.ErrorResult, error) { 426 defer close(filesystemInfoSet) 427 return make([]params.ErrorResult, len(filesystems)), nil 428 } 429 filesystemAttachmentInfoSet := make(chan interface{}) 430 filesystemAccessor.setFilesystemAttachmentInfo = func(filesystemAttachments []params.FilesystemAttachment) ([]params.ErrorResult, error) { 431 defer close(filesystemAttachmentInfoSet) 432 return make([]params.ErrorResult, len(filesystemAttachments)), nil 433 } 434 435 // mockFunc's After will progress the current time by the specified 436 // duration and signal the channel immediately. 437 clock := &mockClock{} 438 var attachFilesystemTimes []time.Time 439 440 s.provider.attachFilesystemsFunc = func(args []storage.FilesystemAttachmentParams) ([]storage.AttachFilesystemsResult, error) { 441 attachFilesystemTimes = append(attachFilesystemTimes, clock.Now()) 442 if len(attachFilesystemTimes) < 10 { 443 return []storage.AttachFilesystemsResult{{Error: errors.New("badness")}}, nil 444 } 445 return []storage.AttachFilesystemsResult{{ 446 FilesystemAttachment: &storage.FilesystemAttachment{ 447 args[0].Filesystem, 448 args[0].Machine, 449 storage.FilesystemAttachmentInfo{ 450 Path: "/oh/over/there", 451 }, 452 }, 453 }}, nil 454 } 455 456 args := &workerArgs{filesystems: filesystemAccessor, clock: clock} 457 worker := newStorageProvisioner(c, args) 458 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 459 defer worker.Kill() 460 461 filesystemAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{ 462 MachineTag: "machine-1", AttachmentTag: "filesystem-1", 463 }} 464 filesystemAccessor.filesystemsWatcher.changes <- []string{"1"} 465 args.environ.watcher.changes <- struct{}{} 466 waitChannel(c, filesystemInfoSet, "waiting for filesystem info to be set") 467 waitChannel(c, filesystemAttachmentInfoSet, "waiting for filesystem attachments to be set") 468 c.Assert(attachFilesystemTimes, gc.HasLen, 10) 469 470 // The first attempt should have been immediate: T0. 471 c.Assert(attachFilesystemTimes[0], gc.Equals, time.Time{}) 472 473 delays := make([]time.Duration, len(attachFilesystemTimes)-1) 474 for i := range attachFilesystemTimes[1:] { 475 delays[i] = attachFilesystemTimes[i+1].Sub(attachFilesystemTimes[i]) 476 } 477 c.Assert(delays, jc.DeepEquals, []time.Duration{ 478 30 * time.Second, 479 1 * time.Minute, 480 2 * time.Minute, 481 4 * time.Minute, 482 8 * time.Minute, 483 16 * time.Minute, 484 30 * time.Minute, // ceiling reached 485 30 * time.Minute, 486 30 * time.Minute, 487 }) 488 489 c.Assert(args.statusSetter.args, jc.DeepEquals, []params.EntityStatusArgs{ 490 {Tag: "filesystem-1", Status: "attaching", Info: ""}, // CreateFilesystems 491 {Tag: "filesystem-1", Status: "attaching", Info: "badness"}, // AttachFilesystems 492 {Tag: "filesystem-1", Status: "attaching", Info: "badness"}, 493 {Tag: "filesystem-1", Status: "attaching", Info: "badness"}, 494 {Tag: "filesystem-1", Status: "attaching", Info: "badness"}, 495 {Tag: "filesystem-1", Status: "attaching", Info: "badness"}, 496 {Tag: "filesystem-1", Status: "attaching", Info: "badness"}, 497 {Tag: "filesystem-1", Status: "attaching", Info: "badness"}, 498 {Tag: "filesystem-1", Status: "attaching", Info: "badness"}, 499 {Tag: "filesystem-1", Status: "attaching", Info: "badness"}, 500 {Tag: "filesystem-1", Status: "attached", Info: ""}, 501 }) 502 } 503 504 func (s *storageProvisionerSuite) TestFilesystemAdded(c *gc.C) { 505 expectedFilesystems := []params.Filesystem{{ 506 FilesystemTag: "filesystem-1", 507 Info: params.FilesystemInfo{ 508 FilesystemId: "id-1", 509 Size: 1024, 510 }, 511 }, { 512 FilesystemTag: "filesystem-2", 513 Info: params.FilesystemInfo{ 514 FilesystemId: "id-2", 515 Size: 1024, 516 }, 517 }} 518 519 filesystemInfoSet := make(chan interface{}) 520 filesystemAccessor := newMockFilesystemAccessor() 521 filesystemAccessor.setFilesystemInfo = func(filesystems []params.Filesystem) ([]params.ErrorResult, error) { 522 defer close(filesystemInfoSet) 523 c.Assert(filesystems, jc.SameContents, expectedFilesystems) 524 return nil, nil 525 } 526 527 args := &workerArgs{filesystems: filesystemAccessor} 528 worker := newStorageProvisioner(c, args) 529 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 530 defer worker.Kill() 531 532 // The worker should create filesystems according to ids "1" and "2". 533 filesystemAccessor.filesystemsWatcher.changes <- []string{"1", "2"} 534 // ... but not until the environment config is available. 535 assertNoEvent(c, filesystemInfoSet, "filesystem info set") 536 args.environ.watcher.changes <- struct{}{} 537 waitChannel(c, filesystemInfoSet, "waiting for filesystem info to be set") 538 } 539 540 func (s *storageProvisionerSuite) TestVolumeNeedsInstance(c *gc.C) { 541 volumeInfoSet := make(chan interface{}) 542 volumeAccessor := newMockVolumeAccessor() 543 volumeAccessor.setVolumeInfo = func([]params.Volume) ([]params.ErrorResult, error) { 544 defer close(volumeInfoSet) 545 return nil, nil 546 } 547 volumeAccessor.setVolumeAttachmentInfo = func([]params.VolumeAttachment) ([]params.ErrorResult, error) { 548 return nil, nil 549 } 550 551 args := &workerArgs{volumes: volumeAccessor} 552 worker := newStorageProvisioner(c, args) 553 defer worker.Wait() 554 defer worker.Kill() 555 556 volumeAccessor.volumesWatcher.changes <- []string{needsInstanceVolumeId} 557 args.environ.watcher.changes <- struct{}{} 558 assertNoEvent(c, volumeInfoSet, "volume info set") 559 args.machines.instanceIds[names.NewMachineTag("1")] = "inst-id" 560 args.machines.watcher.changes <- struct{}{} 561 waitChannel(c, volumeInfoSet, "waiting for volume info to be set") 562 } 563 564 func (s *storageProvisionerSuite) TestVolumeNonDynamic(c *gc.C) { 565 volumeInfoSet := make(chan interface{}) 566 volumeAccessor := newMockVolumeAccessor() 567 volumeAccessor.setVolumeInfo = func([]params.Volume) ([]params.ErrorResult, error) { 568 defer close(volumeInfoSet) 569 return nil, nil 570 } 571 572 args := &workerArgs{volumes: volumeAccessor} 573 worker := newStorageProvisioner(c, args) 574 defer worker.Wait() 575 defer worker.Kill() 576 577 // Volumes for non-dynamic providers should not be created. 578 s.provider.dynamic = false 579 args.environ.watcher.changes <- struct{}{} 580 volumeAccessor.volumesWatcher.changes <- []string{"1"} 581 assertNoEvent(c, volumeInfoSet, "volume info set") 582 } 583 584 func (s *storageProvisionerSuite) TestVolumeAttachmentAdded(c *gc.C) { 585 // We should get two volume attachments: 586 // - volume-1 to machine-1, because the volume and 587 // machine are provisioned, but the attachment is not. 588 // - volume-1 to machine-0, because the volume, 589 // machine, and attachment are provisioned, but 590 // in a previous session, so a reattachment is 591 // requested. 592 expectedVolumeAttachments := []params.VolumeAttachment{{ 593 VolumeTag: "volume-1", 594 MachineTag: "machine-1", 595 Info: params.VolumeAttachmentInfo{ 596 DeviceName: "/dev/sda1", 597 ReadOnly: true, 598 }, 599 }, { 600 VolumeTag: "volume-1", 601 MachineTag: "machine-0", 602 Info: params.VolumeAttachmentInfo{ 603 DeviceName: "/dev/sda1", 604 ReadOnly: true, 605 }, 606 }} 607 608 var allVolumeAttachments []params.VolumeAttachment 609 volumeAttachmentInfoSet := make(chan interface{}) 610 volumeAccessor := newMockVolumeAccessor() 611 volumeAccessor.setVolumeAttachmentInfo = func(volumeAttachments []params.VolumeAttachment) ([]params.ErrorResult, error) { 612 allVolumeAttachments = append(allVolumeAttachments, volumeAttachments...) 613 volumeAttachmentInfoSet <- nil 614 return make([]params.ErrorResult, len(volumeAttachments)), nil 615 } 616 617 // volume-1, machine-0, and machine-1 are provisioned. 618 volumeAccessor.provisionedVolumes["volume-1"] = params.Volume{ 619 VolumeTag: "volume-1", 620 Info: params.VolumeInfo{ 621 VolumeId: "vol-123", 622 }, 623 } 624 volumeAccessor.provisionedMachines["machine-0"] = instance.Id("already-provisioned-0") 625 volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 626 627 // machine-0/volume-1 attachment is already created. 628 // We should see a reattachment. 629 alreadyAttached := params.MachineStorageId{ 630 MachineTag: "machine-0", 631 AttachmentTag: "volume-1", 632 } 633 volumeAccessor.provisionedAttachments[alreadyAttached] = params.VolumeAttachment{ 634 MachineTag: "machine-0", 635 VolumeTag: "volume-1", 636 } 637 638 args := &workerArgs{volumes: volumeAccessor} 639 worker := newStorageProvisioner(c, args) 640 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 641 defer worker.Kill() 642 643 volumeAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{ 644 MachineTag: "machine-1", AttachmentTag: "volume-1", 645 }, { 646 MachineTag: "machine-1", AttachmentTag: "volume-2", 647 }, { 648 MachineTag: "machine-2", AttachmentTag: "volume-1", 649 }, { 650 MachineTag: "machine-0", AttachmentTag: "volume-1", 651 }} 652 assertNoEvent(c, volumeAttachmentInfoSet, "volume attachment info set") 653 volumeAccessor.volumesWatcher.changes <- []string{"1"} 654 args.environ.watcher.changes <- struct{}{} 655 waitChannel(c, volumeAttachmentInfoSet, "waiting for volume attachments to be set") 656 c.Assert(allVolumeAttachments, jc.SameContents, expectedVolumeAttachments) 657 658 // Reattachment should only happen once per session. 659 volumeAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{alreadyAttached} 660 assertNoEvent(c, volumeAttachmentInfoSet, "volume attachment info set") 661 } 662 663 func (s *storageProvisionerSuite) TestFilesystemAttachmentAdded(c *gc.C) { 664 // We should only get a single filesystem attachment, because it is the 665 // only combination where both machine and filesystem are already 666 // provisioned, and the attachmenti s not. 667 // We should get two filesystem attachments: 668 // - filesystem-1 to machine-1, because the filesystem and 669 // machine are provisioned, but the attachment is not. 670 // - filesystem-1 to machine-0, because the filesystem, 671 // machine, and attachment are provisioned, but in a 672 // previous session, so a reattachment is requested. 673 expectedFilesystemAttachments := []params.FilesystemAttachment{{ 674 FilesystemTag: "filesystem-1", 675 MachineTag: "machine-1", 676 Info: params.FilesystemAttachmentInfo{ 677 MountPoint: "/srv/fs-123", 678 }, 679 }, { 680 FilesystemTag: "filesystem-1", 681 MachineTag: "machine-0", 682 Info: params.FilesystemAttachmentInfo{ 683 MountPoint: "/srv/fs-123", 684 }, 685 }} 686 687 var allFilesystemAttachments []params.FilesystemAttachment 688 filesystemAttachmentInfoSet := make(chan interface{}) 689 filesystemAccessor := newMockFilesystemAccessor() 690 filesystemAccessor.setFilesystemAttachmentInfo = func(filesystemAttachments []params.FilesystemAttachment) ([]params.ErrorResult, error) { 691 allFilesystemAttachments = append(allFilesystemAttachments, filesystemAttachments...) 692 filesystemAttachmentInfoSet <- nil 693 return make([]params.ErrorResult, len(filesystemAttachments)), nil 694 } 695 696 // filesystem-1 and machine-1 are provisioned. 697 filesystemAccessor.provisionedFilesystems["filesystem-1"] = params.Filesystem{ 698 FilesystemTag: "filesystem-1", 699 Info: params.FilesystemInfo{ 700 FilesystemId: "fs-123", 701 }, 702 } 703 filesystemAccessor.provisionedMachines["machine-0"] = instance.Id("already-provisioned-0") 704 filesystemAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 705 706 // machine-0/filesystem-1 attachment is already created. 707 // We should see a reattachment. 708 alreadyAttached := params.MachineStorageId{ 709 MachineTag: "machine-0", 710 AttachmentTag: "filesystem-1", 711 } 712 filesystemAccessor.provisionedAttachments[alreadyAttached] = params.FilesystemAttachment{ 713 MachineTag: "machine-0", 714 FilesystemTag: "filesystem-1", 715 } 716 717 args := &workerArgs{filesystems: filesystemAccessor} 718 worker := newStorageProvisioner(c, args) 719 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 720 defer worker.Kill() 721 722 filesystemAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{ 723 MachineTag: "machine-1", AttachmentTag: "filesystem-1", 724 }, { 725 MachineTag: "machine-1", AttachmentTag: "filesystem-2", 726 }, { 727 MachineTag: "machine-2", AttachmentTag: "filesystem-1", 728 }, { 729 MachineTag: "machine-0", AttachmentTag: "filesystem-1", 730 }} 731 // ... but not until the environment config is available. 732 assertNoEvent(c, filesystemAttachmentInfoSet, "filesystem attachment info set") 733 filesystemAccessor.filesystemsWatcher.changes <- []string{"1"} 734 args.environ.watcher.changes <- struct{}{} 735 waitChannel(c, filesystemAttachmentInfoSet, "waiting for filesystem attachments to be set") 736 c.Assert(allFilesystemAttachments, jc.SameContents, expectedFilesystemAttachments) 737 738 // Reattachment should only happen once per session. 739 filesystemAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{alreadyAttached} 740 assertNoEvent(c, filesystemAttachmentInfoSet, "filesystem attachment info set") 741 } 742 743 func (s *storageProvisionerSuite) TestCreateVolumeBackedFilesystem(c *gc.C) { 744 filesystemInfoSet := make(chan interface{}) 745 filesystemAccessor := newMockFilesystemAccessor() 746 filesystemAccessor.setFilesystemInfo = func(filesystems []params.Filesystem) ([]params.ErrorResult, error) { 747 filesystemInfoSet <- filesystems 748 return nil, nil 749 } 750 751 args := &workerArgs{ 752 scope: names.NewMachineTag("0"), 753 filesystems: filesystemAccessor, 754 } 755 worker := newStorageProvisioner(c, args) 756 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 757 defer worker.Kill() 758 759 args.volumes.blockDevices[params.MachineStorageId{ 760 MachineTag: "machine-0", 761 AttachmentTag: "volume-0-0", 762 }] = storage.BlockDevice{ 763 DeviceName: "xvdf1", 764 Size: 123, 765 } 766 filesystemAccessor.filesystemsWatcher.changes <- []string{"0/0", "0/1"} 767 assertNoEvent(c, filesystemInfoSet, "filesystem info set") 768 args.environ.watcher.changes <- struct{}{} 769 770 // Only the block device for volume 0/0 is attached at the moment, 771 // so only the corresponding filesystem will be created. 772 filesystemInfo := waitChannel( 773 c, filesystemInfoSet, 774 "waiting for filesystem info to be set", 775 ).([]params.Filesystem) 776 c.Assert(filesystemInfo, jc.DeepEquals, []params.Filesystem{{ 777 FilesystemTag: "filesystem-0-0", 778 Info: params.FilesystemInfo{ 779 FilesystemId: "xvdf1", 780 Size: 123, 781 }, 782 }}) 783 784 // If we now attach the block device for volume 0/1 and trigger the 785 // notification, then the storage provisioner will wake up and create 786 // the filesystem. 787 args.volumes.blockDevices[params.MachineStorageId{ 788 MachineTag: "machine-0", 789 AttachmentTag: "volume-0-1", 790 }] = storage.BlockDevice{ 791 DeviceName: "xvdf2", 792 Size: 246, 793 } 794 args.volumes.blockDevicesWatcher.changes <- struct{}{} 795 filesystemInfo = waitChannel( 796 c, filesystemInfoSet, 797 "waiting for filesystem info to be set", 798 ).([]params.Filesystem) 799 c.Assert(filesystemInfo, jc.DeepEquals, []params.Filesystem{{ 800 FilesystemTag: "filesystem-0-1", 801 Info: params.FilesystemInfo{ 802 FilesystemId: "xvdf2", 803 Size: 246, 804 }, 805 }}) 806 } 807 808 func (s *storageProvisionerSuite) TestAttachVolumeBackedFilesystem(c *gc.C) { 809 infoSet := make(chan interface{}) 810 filesystemAccessor := newMockFilesystemAccessor() 811 filesystemAccessor.setFilesystemAttachmentInfo = func(attachments []params.FilesystemAttachment) ([]params.ErrorResult, error) { 812 infoSet <- attachments 813 return nil, nil 814 } 815 816 args := &workerArgs{ 817 scope: names.NewMachineTag("0"), 818 filesystems: filesystemAccessor, 819 } 820 worker := newStorageProvisioner(c, args) 821 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 822 defer worker.Kill() 823 824 filesystemAccessor.provisionedFilesystems["filesystem-0-0"] = params.Filesystem{ 825 FilesystemTag: "filesystem-0-0", 826 VolumeTag: "volume-0-0", 827 Info: params.FilesystemInfo{ 828 FilesystemId: "whatever", 829 Size: 123, 830 }, 831 } 832 filesystemAccessor.provisionedMachines["machine-0"] = instance.Id("already-provisioned-0") 833 834 args.volumes.blockDevices[params.MachineStorageId{ 835 MachineTag: "machine-0", 836 AttachmentTag: "volume-0-0", 837 }] = storage.BlockDevice{ 838 DeviceName: "xvdf1", 839 Size: 123, 840 } 841 filesystemAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{ 842 MachineTag: "machine-0", 843 AttachmentTag: "filesystem-0-0", 844 }} 845 assertNoEvent(c, infoSet, "filesystem attachment info set") 846 args.environ.watcher.changes <- struct{}{} 847 filesystemAccessor.filesystemsWatcher.changes <- []string{"0/0"} 848 849 info := waitChannel( 850 c, infoSet, "waiting for filesystem attachment info to be set", 851 ).([]params.FilesystemAttachment) 852 c.Assert(info, jc.DeepEquals, []params.FilesystemAttachment{{ 853 FilesystemTag: "filesystem-0-0", 854 MachineTag: "machine-0", 855 Info: params.FilesystemAttachmentInfo{ 856 MountPoint: "/mnt/xvdf1", 857 ReadOnly: true, 858 }, 859 }}) 860 } 861 862 func (s *storageProvisionerSuite) TestUpdateEnvironConfig(c *gc.C) { 863 volumeAccessor := newMockVolumeAccessor() 864 volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 865 s.provider.volumeSourceFunc = func(envConfig *config.Config, sourceConfig *storage.Config) (storage.VolumeSource, error) { 866 c.Assert(envConfig, gc.NotNil) 867 c.Assert(sourceConfig, gc.NotNil) 868 c.Assert(envConfig.AllAttrs()["foo"], gc.Equals, "bar") 869 return nil, errors.New("zinga") 870 } 871 872 args := &workerArgs{volumes: volumeAccessor} 873 worker := newStorageProvisioner(c, args) 874 defer worker.Wait() 875 defer worker.Kill() 876 877 newConfig, err := args.environ.cfg.Apply(map[string]interface{}{"foo": "bar"}) 878 c.Assert(err, jc.ErrorIsNil) 879 880 args.environ.watcher.changes <- struct{}{} 881 args.environ.setConfig(newConfig) 882 args.environ.watcher.changes <- struct{}{} 883 args.volumes.volumesWatcher.changes <- []string{"1", "2"} 884 885 err = worker.Wait() 886 c.Assert(err, gc.ErrorMatches, `creating volumes: getting volume source: getting storage source "dummy": zinga`) 887 } 888 889 func (s *storageProvisionerSuite) TestResourceTags(c *gc.C) { 890 volumeInfoSet := make(chan interface{}) 891 volumeAccessor := newMockVolumeAccessor() 892 volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 893 volumeAccessor.setVolumeInfo = func(volumes []params.Volume) ([]params.ErrorResult, error) { 894 defer close(volumeInfoSet) 895 return nil, nil 896 } 897 898 filesystemInfoSet := make(chan interface{}) 899 filesystemAccessor := newMockFilesystemAccessor() 900 filesystemAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 901 filesystemAccessor.setFilesystemInfo = func(filesystems []params.Filesystem) ([]params.ErrorResult, error) { 902 defer close(filesystemInfoSet) 903 return nil, nil 904 } 905 906 var volumeSource dummyVolumeSource 907 s.provider.volumeSourceFunc = func(envConfig *config.Config, sourceConfig *storage.Config) (storage.VolumeSource, error) { 908 return &volumeSource, nil 909 } 910 911 var filesystemSource dummyFilesystemSource 912 s.provider.filesystemSourceFunc = func(envConfig *config.Config, sourceConfig *storage.Config) (storage.FilesystemSource, error) { 913 return &filesystemSource, nil 914 } 915 916 args := &workerArgs{ 917 volumes: volumeAccessor, 918 filesystems: filesystemAccessor, 919 } 920 worker := newStorageProvisioner(c, args) 921 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 922 defer worker.Kill() 923 924 volumeAccessor.volumesWatcher.changes <- []string{"1"} 925 filesystemAccessor.filesystemsWatcher.changes <- []string{"1"} 926 args.environ.watcher.changes <- struct{}{} 927 waitChannel(c, volumeInfoSet, "waiting for volume info to be set") 928 waitChannel(c, filesystemInfoSet, "waiting for filesystem info to be set") 929 c.Assert(volumeSource.createVolumesArgs, jc.DeepEquals, [][]storage.VolumeParams{{{ 930 Tag: names.NewVolumeTag("1"), 931 Size: 1024, 932 Provider: "dummy", 933 Attributes: map[string]interface{}{"persistent": true}, 934 ResourceTags: map[string]string{"very": "fancy"}, 935 Attachment: &storage.VolumeAttachmentParams{ 936 Volume: names.NewVolumeTag("1"), 937 AttachmentParams: storage.AttachmentParams{ 938 Machine: names.NewMachineTag("1"), 939 Provider: "dummy", 940 InstanceId: "already-provisioned-1", 941 ReadOnly: true, 942 }, 943 }, 944 }}}) 945 c.Assert(filesystemSource.createFilesystemsArgs, jc.DeepEquals, [][]storage.FilesystemParams{{{ 946 Tag: names.NewFilesystemTag("1"), 947 Size: 1024, 948 Provider: "dummy", 949 ResourceTags: map[string]string{"very": "fancy"}, 950 }}}) 951 } 952 953 func (s *storageProvisionerSuite) TestSetVolumeInfoErrorStopsWorker(c *gc.C) { 954 volumeAccessor := newMockVolumeAccessor() 955 volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 956 volumeAccessor.setVolumeInfo = func(volumes []params.Volume) ([]params.ErrorResult, error) { 957 return nil, errors.New("belly up") 958 } 959 960 args := &workerArgs{volumes: volumeAccessor} 961 worker := newStorageProvisioner(c, args) 962 defer worker.Wait() 963 defer worker.Kill() 964 965 done := make(chan interface{}) 966 go func() { 967 defer close(done) 968 err := worker.Wait() 969 c.Assert(err, gc.ErrorMatches, "creating volumes: publishing volumes to state: belly up") 970 }() 971 972 args.volumes.volumesWatcher.changes <- []string{"1"} 973 args.environ.watcher.changes <- struct{}{} 974 waitChannel(c, done, "waiting for worker to exit") 975 } 976 977 func (s *storageProvisionerSuite) TestSetVolumeInfoErrorResultDoesNotStopWorker(c *gc.C) { 978 volumeAccessor := newMockVolumeAccessor() 979 volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 980 volumeAccessor.setVolumeInfo = func(volumes []params.Volume) ([]params.ErrorResult, error) { 981 return []params.ErrorResult{{Error: ¶ms.Error{Message: "message", Code: "code"}}}, nil 982 } 983 984 args := &workerArgs{volumes: volumeAccessor} 985 worker := newStorageProvisioner(c, args) 986 defer func() { 987 err := worker.Wait() 988 c.Assert(err, jc.ErrorIsNil) 989 }() 990 defer worker.Kill() 991 992 done := make(chan interface{}) 993 go func() { 994 defer close(done) 995 worker.Wait() 996 }() 997 998 args.volumes.volumesWatcher.changes <- []string{"1"} 999 args.environ.watcher.changes <- struct{}{} 1000 assertNoEvent(c, done, "worker exited") 1001 } 1002 1003 func (s *storageProvisionerSuite) TestDetachVolumesUnattached(c *gc.C) { 1004 removed := make(chan interface{}) 1005 removeAttachments := func(ids []params.MachineStorageId) ([]params.ErrorResult, error) { 1006 defer close(removed) 1007 c.Assert(ids, gc.DeepEquals, []params.MachineStorageId{{ 1008 MachineTag: "machine-0", 1009 AttachmentTag: "volume-0", 1010 }}) 1011 return make([]params.ErrorResult, len(ids)), nil 1012 } 1013 1014 args := &workerArgs{ 1015 life: &mockLifecycleManager{removeAttachments: removeAttachments}, 1016 } 1017 worker := newStorageProvisioner(c, args) 1018 defer worker.Wait() 1019 defer worker.Kill() 1020 1021 args.volumes.attachmentsWatcher.changes <- []params.MachineStorageId{{ 1022 MachineTag: "machine-0", AttachmentTag: "volume-0", 1023 }} 1024 args.environ.watcher.changes <- struct{}{} 1025 waitChannel(c, removed, "waiting for attachment to be removed") 1026 } 1027 1028 func (s *storageProvisionerSuite) TestDetachVolumes(c *gc.C) { 1029 var attached bool 1030 volumeAttachmentInfoSet := make(chan interface{}) 1031 volumeAccessor := newMockVolumeAccessor() 1032 volumeAccessor.setVolumeAttachmentInfo = func(volumeAttachments []params.VolumeAttachment) ([]params.ErrorResult, error) { 1033 close(volumeAttachmentInfoSet) 1034 attached = true 1035 for _, a := range volumeAttachments { 1036 id := params.MachineStorageId{ 1037 MachineTag: a.MachineTag, 1038 AttachmentTag: a.VolumeTag, 1039 } 1040 volumeAccessor.provisionedAttachments[id] = a 1041 } 1042 return make([]params.ErrorResult, len(volumeAttachments)), nil 1043 } 1044 1045 expectedAttachmentIds := []params.MachineStorageId{{ 1046 MachineTag: "machine-1", AttachmentTag: "volume-1", 1047 }} 1048 1049 attachmentLife := func(ids []params.MachineStorageId) ([]params.LifeResult, error) { 1050 c.Assert(ids, gc.DeepEquals, expectedAttachmentIds) 1051 life := params.Alive 1052 if attached { 1053 life = params.Dying 1054 } 1055 return []params.LifeResult{{Life: life}}, nil 1056 } 1057 1058 detached := make(chan interface{}) 1059 s.provider.detachVolumesFunc = func(args []storage.VolumeAttachmentParams) ([]error, error) { 1060 c.Assert(args, gc.HasLen, 1) 1061 c.Assert(args[0].Machine.String(), gc.Equals, expectedAttachmentIds[0].MachineTag) 1062 c.Assert(args[0].Volume.String(), gc.Equals, expectedAttachmentIds[0].AttachmentTag) 1063 defer close(detached) 1064 return make([]error, len(args)), nil 1065 } 1066 1067 removed := make(chan interface{}) 1068 removeAttachments := func(ids []params.MachineStorageId) ([]params.ErrorResult, error) { 1069 c.Assert(ids, gc.DeepEquals, expectedAttachmentIds) 1070 close(removed) 1071 return make([]params.ErrorResult, len(ids)), nil 1072 } 1073 1074 // volume-1 and machine-1 are provisioned. 1075 volumeAccessor.provisionedVolumes["volume-1"] = params.Volume{ 1076 VolumeTag: "volume-1", 1077 Info: params.VolumeInfo{ 1078 VolumeId: "vol-123", 1079 }, 1080 } 1081 volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 1082 1083 args := &workerArgs{ 1084 volumes: volumeAccessor, 1085 life: &mockLifecycleManager{ 1086 attachmentLife: attachmentLife, 1087 removeAttachments: removeAttachments, 1088 }, 1089 } 1090 worker := newStorageProvisioner(c, args) 1091 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 1092 defer worker.Kill() 1093 1094 volumeAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{ 1095 MachineTag: "machine-1", AttachmentTag: "volume-1", 1096 }} 1097 volumeAccessor.volumesWatcher.changes <- []string{"1"} 1098 args.environ.watcher.changes <- struct{}{} 1099 waitChannel(c, volumeAttachmentInfoSet, "waiting for volume attachments to be set") 1100 volumeAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{ 1101 MachineTag: "machine-1", AttachmentTag: "volume-1", 1102 }} 1103 waitChannel(c, detached, "waiting for volume to be detached") 1104 waitChannel(c, removed, "waiting for attachment to be removed") 1105 } 1106 1107 func (s *storageProvisionerSuite) TestDetachVolumesRetry(c *gc.C) { 1108 machine := names.NewMachineTag("1") 1109 volume := names.NewVolumeTag("1") 1110 attachmentId := params.MachineStorageId{ 1111 MachineTag: machine.String(), AttachmentTag: volume.String(), 1112 } 1113 volumeAccessor := newMockVolumeAccessor() 1114 volumeAccessor.provisionedAttachments[attachmentId] = params.VolumeAttachment{ 1115 MachineTag: machine.String(), 1116 VolumeTag: volume.String(), 1117 } 1118 volumeAccessor.provisionedVolumes[volume.String()] = params.Volume{ 1119 VolumeTag: volume.String(), 1120 Info: params.VolumeInfo{ 1121 VolumeId: "vol-123", 1122 }, 1123 } 1124 volumeAccessor.provisionedMachines[machine.String()] = instance.Id("already-provisioned-1") 1125 1126 attachmentLife := func(ids []params.MachineStorageId) ([]params.LifeResult, error) { 1127 return []params.LifeResult{{Life: params.Dying}}, nil 1128 } 1129 1130 // mockFunc's After will progress the current time by the specified 1131 // duration and signal the channel immediately. 1132 clock := &mockClock{} 1133 var detachVolumeTimes []time.Time 1134 1135 s.provider.detachVolumesFunc = func(args []storage.VolumeAttachmentParams) ([]error, error) { 1136 detachVolumeTimes = append(detachVolumeTimes, clock.Now()) 1137 if len(detachVolumeTimes) < 10 { 1138 return []error{errors.New("badness")}, nil 1139 } 1140 return []error{nil}, nil 1141 } 1142 1143 removed := make(chan interface{}) 1144 removeAttachments := func(ids []params.MachineStorageId) ([]params.ErrorResult, error) { 1145 close(removed) 1146 return make([]params.ErrorResult, len(ids)), nil 1147 } 1148 1149 args := &workerArgs{ 1150 volumes: volumeAccessor, 1151 clock: clock, 1152 life: &mockLifecycleManager{ 1153 attachmentLife: attachmentLife, 1154 removeAttachments: removeAttachments, 1155 }, 1156 } 1157 worker := newStorageProvisioner(c, args) 1158 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 1159 defer worker.Kill() 1160 1161 volumeAccessor.volumesWatcher.changes <- []string{volume.Id()} 1162 args.environ.watcher.changes <- struct{}{} 1163 volumeAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{attachmentId} 1164 waitChannel(c, removed, "waiting for attachment to be removed") 1165 c.Assert(detachVolumeTimes, gc.HasLen, 10) 1166 1167 // The first attempt should have been immediate: T0. 1168 c.Assert(detachVolumeTimes[0], gc.Equals, time.Time{}) 1169 1170 delays := make([]time.Duration, len(detachVolumeTimes)-1) 1171 for i := range detachVolumeTimes[1:] { 1172 delays[i] = detachVolumeTimes[i+1].Sub(detachVolumeTimes[i]) 1173 } 1174 c.Assert(delays, jc.DeepEquals, []time.Duration{ 1175 30 * time.Second, 1176 1 * time.Minute, 1177 2 * time.Minute, 1178 4 * time.Minute, 1179 8 * time.Minute, 1180 16 * time.Minute, 1181 30 * time.Minute, // ceiling reached 1182 30 * time.Minute, 1183 30 * time.Minute, 1184 }) 1185 1186 c.Assert(args.statusSetter.args, jc.DeepEquals, []params.EntityStatusArgs{ 1187 {Tag: "volume-1", Status: "detaching", Info: "badness"}, // DetachVolumes 1188 {Tag: "volume-1", Status: "detaching", Info: "badness"}, 1189 {Tag: "volume-1", Status: "detaching", Info: "badness"}, 1190 {Tag: "volume-1", Status: "detaching", Info: "badness"}, 1191 {Tag: "volume-1", Status: "detaching", Info: "badness"}, 1192 {Tag: "volume-1", Status: "detaching", Info: "badness"}, 1193 {Tag: "volume-1", Status: "detaching", Info: "badness"}, 1194 {Tag: "volume-1", Status: "detaching", Info: "badness"}, 1195 {Tag: "volume-1", Status: "detaching", Info: "badness"}, 1196 {Tag: "volume-1", Status: "detached", Info: ""}, 1197 }) 1198 } 1199 1200 func (s *storageProvisionerSuite) TestDetachFilesystemsUnattached(c *gc.C) { 1201 removed := make(chan interface{}) 1202 removeAttachments := func(ids []params.MachineStorageId) ([]params.ErrorResult, error) { 1203 defer close(removed) 1204 c.Assert(ids, gc.DeepEquals, []params.MachineStorageId{{ 1205 MachineTag: "machine-0", 1206 AttachmentTag: "filesystem-0", 1207 }}) 1208 return make([]params.ErrorResult, len(ids)), nil 1209 } 1210 1211 args := &workerArgs{ 1212 life: &mockLifecycleManager{removeAttachments: removeAttachments}, 1213 } 1214 worker := newStorageProvisioner(c, args) 1215 defer worker.Wait() 1216 defer worker.Kill() 1217 1218 args.filesystems.attachmentsWatcher.changes <- []params.MachineStorageId{{ 1219 MachineTag: "machine-0", AttachmentTag: "filesystem-0", 1220 }} 1221 args.environ.watcher.changes <- struct{}{} 1222 waitChannel(c, removed, "waiting for attachment to be removed") 1223 } 1224 1225 func (s *storageProvisionerSuite) TestDetachFilesystems(c *gc.C) { 1226 var attached bool 1227 filesystemAttachmentInfoSet := make(chan interface{}) 1228 filesystemAccessor := newMockFilesystemAccessor() 1229 filesystemAccessor.setFilesystemAttachmentInfo = func(filesystemAttachments []params.FilesystemAttachment) ([]params.ErrorResult, error) { 1230 close(filesystemAttachmentInfoSet) 1231 attached = true 1232 for _, a := range filesystemAttachments { 1233 id := params.MachineStorageId{ 1234 MachineTag: a.MachineTag, 1235 AttachmentTag: a.FilesystemTag, 1236 } 1237 filesystemAccessor.provisionedAttachments[id] = a 1238 } 1239 return make([]params.ErrorResult, len(filesystemAttachments)), nil 1240 } 1241 1242 expectedAttachmentIds := []params.MachineStorageId{{ 1243 MachineTag: "machine-1", AttachmentTag: "filesystem-1", 1244 }} 1245 1246 attachmentLife := func(ids []params.MachineStorageId) ([]params.LifeResult, error) { 1247 c.Assert(ids, gc.DeepEquals, expectedAttachmentIds) 1248 life := params.Alive 1249 if attached { 1250 life = params.Dying 1251 } 1252 return []params.LifeResult{{Life: life}}, nil 1253 } 1254 1255 detached := make(chan interface{}) 1256 s.provider.detachFilesystemsFunc = func(args []storage.FilesystemAttachmentParams) ([]error, error) { 1257 c.Assert(args, gc.HasLen, 1) 1258 c.Assert(args[0].Machine.String(), gc.Equals, expectedAttachmentIds[0].MachineTag) 1259 c.Assert(args[0].Filesystem.String(), gc.Equals, expectedAttachmentIds[0].AttachmentTag) 1260 defer close(detached) 1261 return make([]error, len(args)), nil 1262 } 1263 1264 removed := make(chan interface{}) 1265 removeAttachments := func(ids []params.MachineStorageId) ([]params.ErrorResult, error) { 1266 c.Assert(ids, gc.DeepEquals, expectedAttachmentIds) 1267 close(removed) 1268 return make([]params.ErrorResult, len(ids)), nil 1269 } 1270 1271 // filesystem-1 and machine-1 are provisioned. 1272 filesystemAccessor.provisionedFilesystems["filesystem-1"] = params.Filesystem{ 1273 FilesystemTag: "filesystem-1", 1274 Info: params.FilesystemInfo{ 1275 FilesystemId: "fs-id", 1276 }, 1277 } 1278 filesystemAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 1279 1280 args := &workerArgs{ 1281 filesystems: filesystemAccessor, 1282 life: &mockLifecycleManager{ 1283 attachmentLife: attachmentLife, 1284 removeAttachments: removeAttachments, 1285 }, 1286 } 1287 worker := newStorageProvisioner(c, args) 1288 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 1289 defer worker.Kill() 1290 1291 filesystemAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{ 1292 MachineTag: "machine-1", AttachmentTag: "filesystem-1", 1293 }} 1294 filesystemAccessor.filesystemsWatcher.changes <- []string{"1"} 1295 args.environ.watcher.changes <- struct{}{} 1296 waitChannel(c, filesystemAttachmentInfoSet, "waiting for filesystem attachments to be set") 1297 filesystemAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{ 1298 MachineTag: "machine-1", AttachmentTag: "filesystem-1", 1299 }} 1300 waitChannel(c, detached, "waiting for filesystem to be detached") 1301 waitChannel(c, removed, "waiting for attachment to be removed") 1302 } 1303 1304 func (s *storageProvisionerSuite) TestDestroyVolumes(c *gc.C) { 1305 provisionedVolume := names.NewVolumeTag("1") 1306 unprovisionedVolume := names.NewVolumeTag("2") 1307 1308 volumeAccessor := newMockVolumeAccessor() 1309 volumeAccessor.provisionVolume(provisionedVolume) 1310 1311 life := func(tags []names.Tag) ([]params.LifeResult, error) { 1312 results := make([]params.LifeResult, len(tags)) 1313 for i := range results { 1314 results[i].Life = params.Dead 1315 } 1316 return results, nil 1317 } 1318 1319 destroyedChan := make(chan interface{}, 1) 1320 s.provider.destroyVolumesFunc = func(volumeIds []string) ([]error, error) { 1321 destroyedChan <- volumeIds 1322 return make([]error, len(volumeIds)), nil 1323 } 1324 1325 removedChan := make(chan interface{}, 1) 1326 remove := func(tags []names.Tag) ([]params.ErrorResult, error) { 1327 removedChan <- tags 1328 return make([]params.ErrorResult, len(tags)), nil 1329 } 1330 1331 args := &workerArgs{ 1332 volumes: volumeAccessor, 1333 life: &mockLifecycleManager{ 1334 life: life, 1335 remove: remove, 1336 }, 1337 } 1338 worker := newStorageProvisioner(c, args) 1339 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 1340 defer worker.Kill() 1341 1342 volumeAccessor.volumesWatcher.changes <- []string{ 1343 provisionedVolume.Id(), 1344 unprovisionedVolume.Id(), 1345 } 1346 args.environ.watcher.changes <- struct{}{} 1347 1348 // Both volumes should be removed; the provisioned one 1349 // should be deprovisioned first. 1350 1351 destroyed := waitChannel(c, destroyedChan, "waiting for volume to be deprovisioned") 1352 assertNoEvent(c, destroyedChan, "volumes deprovisioned") 1353 c.Assert(destroyed, jc.DeepEquals, []string{"vol-1"}) 1354 1355 var removed []names.Tag 1356 for len(removed) < 2 { 1357 tags := waitChannel(c, removedChan, "waiting for volumes to be removed").([]names.Tag) 1358 removed = append(removed, tags...) 1359 } 1360 c.Assert(removed, jc.SameContents, []names.Tag{provisionedVolume, unprovisionedVolume}) 1361 assertNoEvent(c, removedChan, "volumes removed") 1362 } 1363 1364 func (s *storageProvisionerSuite) TestDestroyVolumesRetry(c *gc.C) { 1365 volume := names.NewVolumeTag("1") 1366 volumeAccessor := newMockVolumeAccessor() 1367 volumeAccessor.provisionVolume(volume) 1368 1369 life := func(tags []names.Tag) ([]params.LifeResult, error) { 1370 return []params.LifeResult{{Life: params.Dead}}, nil 1371 } 1372 1373 // mockFunc's After will progress the current time by the specified 1374 // duration and signal the channel immediately. 1375 clock := &mockClock{} 1376 var destroyVolumeTimes []time.Time 1377 1378 s.provider.destroyVolumesFunc = func(volumeIds []string) ([]error, error) { 1379 destroyVolumeTimes = append(destroyVolumeTimes, clock.Now()) 1380 if len(destroyVolumeTimes) < 10 { 1381 return []error{errors.New("badness")}, nil 1382 } 1383 return []error{nil}, nil 1384 } 1385 1386 removedChan := make(chan interface{}, 1) 1387 remove := func(tags []names.Tag) ([]params.ErrorResult, error) { 1388 removedChan <- tags 1389 return make([]params.ErrorResult, len(tags)), nil 1390 } 1391 1392 args := &workerArgs{ 1393 volumes: volumeAccessor, 1394 clock: clock, 1395 life: &mockLifecycleManager{ 1396 life: life, 1397 remove: remove, 1398 }, 1399 } 1400 worker := newStorageProvisioner(c, args) 1401 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 1402 defer worker.Kill() 1403 1404 volumeAccessor.volumesWatcher.changes <- []string{volume.Id()} 1405 args.environ.watcher.changes <- struct{}{} 1406 waitChannel(c, removedChan, "waiting for volume to be removed") 1407 c.Assert(destroyVolumeTimes, gc.HasLen, 10) 1408 1409 // The first attempt should have been immediate: T0. 1410 c.Assert(destroyVolumeTimes[0], gc.Equals, time.Time{}) 1411 1412 delays := make([]time.Duration, len(destroyVolumeTimes)-1) 1413 for i := range destroyVolumeTimes[1:] { 1414 delays[i] = destroyVolumeTimes[i+1].Sub(destroyVolumeTimes[i]) 1415 } 1416 c.Assert(delays, jc.DeepEquals, []time.Duration{ 1417 30 * time.Second, 1418 1 * time.Minute, 1419 2 * time.Minute, 1420 4 * time.Minute, 1421 8 * time.Minute, 1422 16 * time.Minute, 1423 30 * time.Minute, // ceiling reached 1424 30 * time.Minute, 1425 30 * time.Minute, 1426 }) 1427 1428 c.Assert(args.statusSetter.args, jc.DeepEquals, []params.EntityStatusArgs{ 1429 {Tag: "volume-1", Status: "destroying", Info: "badness"}, 1430 {Tag: "volume-1", Status: "destroying", Info: "badness"}, 1431 {Tag: "volume-1", Status: "destroying", Info: "badness"}, 1432 {Tag: "volume-1", Status: "destroying", Info: "badness"}, 1433 {Tag: "volume-1", Status: "destroying", Info: "badness"}, 1434 {Tag: "volume-1", Status: "destroying", Info: "badness"}, 1435 {Tag: "volume-1", Status: "destroying", Info: "badness"}, 1436 {Tag: "volume-1", Status: "destroying", Info: "badness"}, 1437 {Tag: "volume-1", Status: "destroying", Info: "badness"}, 1438 }) 1439 } 1440 1441 func (s *storageProvisionerSuite) TestDestroyFilesystems(c *gc.C) { 1442 provisionedFilesystem := names.NewFilesystemTag("1") 1443 unprovisionedFilesystem := names.NewFilesystemTag("2") 1444 1445 filesystemAccessor := newMockFilesystemAccessor() 1446 filesystemAccessor.provisionFilesystem(provisionedFilesystem) 1447 1448 life := func(tags []names.Tag) ([]params.LifeResult, error) { 1449 results := make([]params.LifeResult, len(tags)) 1450 for i := range results { 1451 results[i].Life = params.Dead 1452 } 1453 return results, nil 1454 } 1455 1456 removedChan := make(chan interface{}, 1) 1457 remove := func(tags []names.Tag) ([]params.ErrorResult, error) { 1458 removedChan <- tags 1459 return make([]params.ErrorResult, len(tags)), nil 1460 } 1461 1462 args := &workerArgs{ 1463 filesystems: filesystemAccessor, 1464 life: &mockLifecycleManager{ 1465 life: life, 1466 remove: remove, 1467 }, 1468 } 1469 worker := newStorageProvisioner(c, args) 1470 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 1471 defer worker.Kill() 1472 1473 filesystemAccessor.filesystemsWatcher.changes <- []string{ 1474 provisionedFilesystem.Id(), 1475 unprovisionedFilesystem.Id(), 1476 } 1477 args.environ.watcher.changes <- struct{}{} 1478 1479 // Both filesystems should be removed; the provisioned one 1480 // *should* be deprovisioned first, but we don't currently 1481 // have the ability to do so via the storage provider API. 1482 1483 var removed []names.Tag 1484 for len(removed) < 2 { 1485 tags := waitChannel(c, removedChan, "waiting for filesystems to be removed").([]names.Tag) 1486 removed = append(removed, tags...) 1487 } 1488 c.Assert(removed, jc.SameContents, []names.Tag{provisionedFilesystem, unprovisionedFilesystem}) 1489 assertNoEvent(c, removedChan, "filesystems removed") 1490 } 1491 1492 func newStorageProvisioner(c *gc.C, args *workerArgs) worker.Worker { 1493 if args == nil { 1494 args = &workerArgs{} 1495 } 1496 if args.scope == nil { 1497 args.scope = coretesting.EnvironmentTag 1498 } 1499 if args.volumes == nil { 1500 args.volumes = newMockVolumeAccessor() 1501 } 1502 if args.filesystems == nil { 1503 args.filesystems = newMockFilesystemAccessor() 1504 } 1505 if args.life == nil { 1506 args.life = &mockLifecycleManager{} 1507 } 1508 if args.environ == nil { 1509 args.environ = newMockEnvironAccessor(c) 1510 } 1511 if args.machines == nil { 1512 args.machines = newMockMachineAccessor(c) 1513 } 1514 if args.clock == nil { 1515 args.clock = &mockClock{} 1516 } 1517 if args.statusSetter == nil { 1518 args.statusSetter = &mockStatusSetter{} 1519 } 1520 return storageprovisioner.NewStorageProvisioner( 1521 args.scope, 1522 "storage-dir", 1523 args.volumes, 1524 args.filesystems, 1525 args.life, 1526 args.environ, 1527 args.machines, 1528 args.statusSetter, 1529 args.clock, 1530 ) 1531 } 1532 1533 type workerArgs struct { 1534 scope names.Tag 1535 volumes *mockVolumeAccessor 1536 filesystems *mockFilesystemAccessor 1537 life *mockLifecycleManager 1538 environ *mockEnvironAccessor 1539 machines *mockMachineAccessor 1540 clock clock.Clock 1541 statusSetter *mockStatusSetter 1542 } 1543 1544 func waitChannel(c *gc.C, ch <-chan interface{}, activity string) interface{} { 1545 select { 1546 case v := <-ch: 1547 return v 1548 case <-time.After(coretesting.LongWait): 1549 c.Fatalf("timed out " + activity) 1550 panic("unreachable") 1551 } 1552 } 1553 1554 func assertNoEvent(c *gc.C, ch <-chan interface{}, event string) { 1555 select { 1556 case <-ch: 1557 c.Fatalf("unexpected " + event) 1558 case <-time.After(coretesting.ShortWait): 1559 } 1560 }