github.com/niedbalski/juju@v0.0.0-20190215020005-8ff100488e47/worker/storageprovisioner/storageprovisioner_test.go (about) 1 // Copyright 2015 Canonical Ltd. 2 // Licensed under the AGPLv3, see LICENCE file for details. 3 4 package storageprovisioner_test 5 6 import ( 7 "time" 8 9 "github.com/juju/clock" 10 "github.com/juju/errors" 11 jc "github.com/juju/testing/checkers" 12 gc "gopkg.in/check.v1" 13 "gopkg.in/juju/names.v2" 14 "gopkg.in/juju/worker.v1" 15 "gopkg.in/juju/worker.v1/workertest" 16 17 "github.com/juju/juju/apiserver/params" 18 "github.com/juju/juju/core/instance" 19 "github.com/juju/juju/core/watcher" 20 "github.com/juju/juju/environs/context" 21 "github.com/juju/juju/storage" 22 coretesting "github.com/juju/juju/testing" 23 "github.com/juju/juju/worker/storageprovisioner" 24 ) 25 26 type storageProvisionerSuite struct { 27 coretesting.BaseSuite 28 provider *dummyProvider 29 registry storage.ProviderRegistry 30 managedFilesystemSource *mockManagedFilesystemSource 31 } 32 33 var _ = gc.Suite(&storageProvisionerSuite{}) 34 35 func (s *storageProvisionerSuite) SetUpTest(c *gc.C) { 36 s.BaseSuite.SetUpTest(c) 37 s.provider = &dummyProvider{dynamic: true} 38 s.registry = storage.StaticProviderRegistry{ 39 map[storage.ProviderType]storage.Provider{ 40 "dummy": s.provider, 41 }, 42 } 43 44 s.managedFilesystemSource = nil 45 s.PatchValue( 46 storageprovisioner.NewManagedFilesystemSource, 47 func( 48 blockDevices map[names.VolumeTag]storage.BlockDevice, 49 filesystems map[names.FilesystemTag]storage.Filesystem, 50 ) storage.FilesystemSource { 51 s.managedFilesystemSource = &mockManagedFilesystemSource{ 52 blockDevices: blockDevices, 53 filesystems: filesystems, 54 } 55 return s.managedFilesystemSource 56 }, 57 ) 58 } 59 60 func (s *storageProvisionerSuite) TestStartStop(c *gc.C) { 61 worker, err := storageprovisioner.NewStorageProvisioner(storageprovisioner.Config{ 62 Scope: coretesting.ModelTag, 63 Volumes: newMockVolumeAccessor(), 64 Filesystems: newMockFilesystemAccessor(), 65 Life: &mockLifecycleManager{}, 66 Registry: s.registry, 67 Machines: newMockMachineAccessor(c), 68 Status: &mockStatusSetter{}, 69 Clock: &mockClock{}, 70 CloudCallContext: context.NewCloudCallContext(), 71 }) 72 c.Assert(err, jc.ErrorIsNil) 73 74 worker.Kill() 75 c.Assert(worker.Wait(), gc.IsNil) 76 } 77 78 func (s *storageProvisionerSuite) TestInvalidConfig(c *gc.C) { 79 _, err := storageprovisioner.NewStorageProvisioner(almostValidConfig()) 80 c.Check(err, jc.Satisfies, errors.IsNotValid) 81 } 82 83 func (s *storageProvisionerSuite) TestVolumeAdded(c *gc.C) { 84 expectedVolumes := []params.Volume{{ 85 VolumeTag: "volume-1", 86 Info: params.VolumeInfo{ 87 VolumeId: "id-1", 88 HardwareId: "serial-1", 89 Size: 1024, 90 Persistent: true, 91 }, 92 }, { 93 VolumeTag: "volume-2", 94 Info: params.VolumeInfo{ 95 VolumeId: "id-2", 96 HardwareId: "serial-2", 97 Size: 1024, 98 }, 99 }} 100 expectedVolumeAttachments := []params.VolumeAttachment{{ 101 VolumeTag: "volume-1", 102 MachineTag: "machine-1", 103 Info: params.VolumeAttachmentInfo{ 104 DeviceName: "/dev/sda1", 105 ReadOnly: true, 106 }, 107 }, { 108 VolumeTag: "volume-2", 109 MachineTag: "machine-1", 110 Info: params.VolumeAttachmentInfo{ 111 DeviceName: "/dev/sda2", 112 }, 113 }} 114 115 volumeInfoSet := make(chan interface{}) 116 volumeAccessor := newMockVolumeAccessor() 117 volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 118 volumeAccessor.setVolumeInfo = func(volumes []params.Volume) ([]params.ErrorResult, error) { 119 defer close(volumeInfoSet) 120 c.Assert(volumes, jc.SameContents, expectedVolumes) 121 return nil, nil 122 } 123 124 volumeAttachmentInfoSet := make(chan interface{}) 125 volumeAccessor.setVolumeAttachmentInfo = func(volumeAttachments []params.VolumeAttachment) ([]params.ErrorResult, error) { 126 defer close(volumeAttachmentInfoSet) 127 c.Assert(volumeAttachments, jc.SameContents, expectedVolumeAttachments) 128 return nil, nil 129 } 130 volumeAttachmentPlansCreate := make(chan interface{}) 131 volumeAccessor.createVolumeAttachmentPlans = func(volumeAttachmentPlans []params.VolumeAttachmentPlan) ([]params.ErrorResult, error) { 132 defer close(volumeAttachmentPlansCreate) 133 return make([]params.ErrorResult, len(volumeAttachmentPlans)), nil 134 } 135 136 args := &workerArgs{volumes: volumeAccessor, registry: s.registry} 137 worker := newStorageProvisioner(c, args) 138 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 139 defer worker.Kill() 140 141 volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ 142 MachineTag: "machine-1", AttachmentTag: "volume-1", 143 }, { 144 MachineTag: "machine-1", AttachmentTag: "volume-2", 145 }} 146 assertNoEvent(c, volumeAttachmentPlansCreate, "volume attachment plans set") 147 assertNoEvent(c, volumeAttachmentInfoSet, "volume attachment set") 148 // The worker should create volumes according to ids "1" and "2". 149 volumeAccessor.volumesWatcher.changes <- []string{"1", "2"} 150 waitChannel(c, volumeInfoSet, "waiting for volume info to be set") 151 waitChannel(c, volumeAttachmentPlansCreate, "waiting for volume attachment plans to be set") 152 waitChannel(c, volumeAttachmentInfoSet, "waiting for volume attachments to be set") 153 } 154 155 func (s *storageProvisionerSuite) TestCreateVolumeCreatesAttachment(c *gc.C) { 156 volumeAccessor := newMockVolumeAccessor() 157 volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 158 159 volumeAttachmentInfoSet := make(chan interface{}) 160 volumeAccessor.setVolumeAttachmentInfo = func(volumeAttachments []params.VolumeAttachment) ([]params.ErrorResult, error) { 161 defer close(volumeAttachmentInfoSet) 162 return make([]params.ErrorResult, len(volumeAttachments)), nil 163 } 164 volumeAttachmentPlansCreate := make(chan interface{}) 165 volumeAccessor.createVolumeAttachmentPlans = func(volumeAttachmentPlans []params.VolumeAttachmentPlan) ([]params.ErrorResult, error) { 166 defer close(volumeAttachmentPlansCreate) 167 return make([]params.ErrorResult, len(volumeAttachmentPlans)), nil 168 } 169 170 s.provider.createVolumesFunc = func(args []storage.VolumeParams) ([]storage.CreateVolumesResult, error) { 171 volumeAccessor.provisionedAttachments[params.MachineStorageId{ 172 MachineTag: args[0].Attachment.Machine.String(), 173 AttachmentTag: args[0].Attachment.Volume.String(), 174 }] = params.VolumeAttachment{ 175 VolumeTag: args[0].Attachment.Volume.String(), 176 MachineTag: args[0].Attachment.Machine.String(), 177 } 178 return []storage.CreateVolumesResult{{ 179 Volume: &storage.Volume{ 180 Tag: args[0].Tag, 181 VolumeInfo: storage.VolumeInfo{ 182 VolumeId: "vol-ume", 183 }, 184 }, 185 VolumeAttachment: &storage.VolumeAttachment{ 186 Volume: args[0].Attachment.Volume, 187 Machine: args[0].Attachment.Machine, 188 }, 189 }}, nil 190 } 191 192 attachVolumesCalled := make(chan interface{}) 193 s.provider.attachVolumesFunc = func(args []storage.VolumeAttachmentParams) ([]storage.AttachVolumesResult, error) { 194 defer close(attachVolumesCalled) 195 return nil, errors.New("should not be called") 196 } 197 198 args := &workerArgs{volumes: volumeAccessor, registry: s.registry} 199 worker := newStorageProvisioner(c, args) 200 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 201 defer worker.Kill() 202 203 volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ 204 MachineTag: "machine-1", AttachmentTag: "volume-1", 205 }} 206 assertNoEvent(c, volumeAttachmentInfoSet, "volume attachment set") 207 208 // The worker should create volumes according to ids "1". 209 volumeAccessor.volumesWatcher.changes <- []string{"1"} 210 waitChannel(c, volumeAttachmentInfoSet, "waiting for volume attachments to be set") 211 assertNoEvent(c, attachVolumesCalled, "AttachVolumes called") 212 } 213 214 func (s *storageProvisionerSuite) TestCreateVolumeRetry(c *gc.C) { 215 volumeInfoSet := make(chan interface{}) 216 volumeAccessor := newMockVolumeAccessor() 217 volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 218 volumeAccessor.setVolumeInfo = func(volumes []params.Volume) ([]params.ErrorResult, error) { 219 defer close(volumeInfoSet) 220 return make([]params.ErrorResult, len(volumes)), nil 221 } 222 223 // mockFunc's After will progress the current time by the specified 224 // duration and signal the channel immediately. 225 clock := &mockClock{} 226 var createVolumeTimes []time.Time 227 228 s.provider.createVolumesFunc = func(args []storage.VolumeParams) ([]storage.CreateVolumesResult, error) { 229 createVolumeTimes = append(createVolumeTimes, clock.Now()) 230 if len(createVolumeTimes) < 10 { 231 return []storage.CreateVolumesResult{{Error: errors.New("badness")}}, nil 232 } 233 return []storage.CreateVolumesResult{{ 234 Volume: &storage.Volume{Tag: args[0].Tag}, 235 }}, nil 236 } 237 238 args := &workerArgs{volumes: volumeAccessor, clock: clock, registry: s.registry} 239 worker := newStorageProvisioner(c, args) 240 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 241 defer worker.Kill() 242 243 volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ 244 MachineTag: "machine-1", AttachmentTag: "volume-1", 245 }} 246 volumeAccessor.volumesWatcher.changes <- []string{"1"} 247 waitChannel(c, volumeInfoSet, "waiting for volume info to be set") 248 c.Assert(createVolumeTimes, gc.HasLen, 10) 249 250 // The first attempt should have been immediate: T0. 251 c.Assert(createVolumeTimes[0], gc.Equals, time.Time{}) 252 253 delays := make([]time.Duration, len(createVolumeTimes)-1) 254 for i := range createVolumeTimes[1:] { 255 delays[i] = createVolumeTimes[i+1].Sub(createVolumeTimes[i]) 256 } 257 c.Assert(delays, jc.DeepEquals, []time.Duration{ 258 30 * time.Second, 259 1 * time.Minute, 260 2 * time.Minute, 261 4 * time.Minute, 262 8 * time.Minute, 263 16 * time.Minute, 264 30 * time.Minute, // ceiling reached 265 30 * time.Minute, 266 30 * time.Minute, 267 }) 268 269 c.Assert(args.statusSetter.args, jc.DeepEquals, []params.EntityStatusArgs{ 270 {Tag: "volume-1", Status: "pending", Info: "badness"}, 271 {Tag: "volume-1", Status: "pending", Info: "badness"}, 272 {Tag: "volume-1", Status: "pending", Info: "badness"}, 273 {Tag: "volume-1", Status: "pending", Info: "badness"}, 274 {Tag: "volume-1", Status: "pending", Info: "badness"}, 275 {Tag: "volume-1", Status: "pending", Info: "badness"}, 276 {Tag: "volume-1", Status: "pending", Info: "badness"}, 277 {Tag: "volume-1", Status: "pending", Info: "badness"}, 278 {Tag: "volume-1", Status: "pending", Info: "badness"}, 279 {Tag: "volume-1", Status: "attaching", Info: ""}, 280 }) 281 } 282 283 func (s *storageProvisionerSuite) TestCreateFilesystemRetry(c *gc.C) { 284 filesystemInfoSet := make(chan interface{}) 285 filesystemAccessor := newMockFilesystemAccessor() 286 filesystemAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 287 filesystemAccessor.setFilesystemInfo = func(filesystems []params.Filesystem) ([]params.ErrorResult, error) { 288 defer close(filesystemInfoSet) 289 return make([]params.ErrorResult, len(filesystems)), nil 290 } 291 292 // mockFunc's After will progress the current time by the specified 293 // duration and signal the channel immediately. 294 clock := &mockClock{} 295 var createFilesystemTimes []time.Time 296 297 s.provider.createFilesystemsFunc = func(args []storage.FilesystemParams) ([]storage.CreateFilesystemsResult, error) { 298 createFilesystemTimes = append(createFilesystemTimes, clock.Now()) 299 if len(createFilesystemTimes) < 10 { 300 return []storage.CreateFilesystemsResult{{Error: errors.New("badness")}}, nil 301 } 302 return []storage.CreateFilesystemsResult{{ 303 Filesystem: &storage.Filesystem{Tag: args[0].Tag}, 304 }}, nil 305 } 306 307 args := &workerArgs{filesystems: filesystemAccessor, clock: clock, registry: s.registry} 308 worker := newStorageProvisioner(c, args) 309 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 310 defer worker.Kill() 311 312 filesystemAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ 313 MachineTag: "machine-1", AttachmentTag: "filesystem-1", 314 }} 315 filesystemAccessor.filesystemsWatcher.changes <- []string{"1"} 316 waitChannel(c, filesystemInfoSet, "waiting for filesystem info to be set") 317 c.Assert(createFilesystemTimes, gc.HasLen, 10) 318 319 // The first attempt should have been immediate: T0. 320 c.Assert(createFilesystemTimes[0], gc.Equals, time.Time{}) 321 322 delays := make([]time.Duration, len(createFilesystemTimes)-1) 323 for i := range createFilesystemTimes[1:] { 324 delays[i] = createFilesystemTimes[i+1].Sub(createFilesystemTimes[i]) 325 } 326 c.Assert(delays, jc.DeepEquals, []time.Duration{ 327 30 * time.Second, 328 1 * time.Minute, 329 2 * time.Minute, 330 4 * time.Minute, 331 8 * time.Minute, 332 16 * time.Minute, 333 30 * time.Minute, // ceiling reached 334 30 * time.Minute, 335 30 * time.Minute, 336 }) 337 338 c.Assert(args.statusSetter.args, jc.DeepEquals, []params.EntityStatusArgs{ 339 {Tag: "filesystem-1", Status: "pending", Info: "badness"}, 340 {Tag: "filesystem-1", Status: "pending", Info: "badness"}, 341 {Tag: "filesystem-1", Status: "pending", Info: "badness"}, 342 {Tag: "filesystem-1", Status: "pending", Info: "badness"}, 343 {Tag: "filesystem-1", Status: "pending", Info: "badness"}, 344 {Tag: "filesystem-1", Status: "pending", Info: "badness"}, 345 {Tag: "filesystem-1", Status: "pending", Info: "badness"}, 346 {Tag: "filesystem-1", Status: "pending", Info: "badness"}, 347 {Tag: "filesystem-1", Status: "pending", Info: "badness"}, 348 {Tag: "filesystem-1", Status: "attaching", Info: ""}, 349 }) 350 } 351 352 func (s *storageProvisionerSuite) TestAttachVolumeRetry(c *gc.C) { 353 volumeInfoSet := make(chan interface{}) 354 volumeAccessor := newMockVolumeAccessor() 355 volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 356 volumeAccessor.setVolumeInfo = func(volumes []params.Volume) ([]params.ErrorResult, error) { 357 defer close(volumeInfoSet) 358 return make([]params.ErrorResult, len(volumes)), nil 359 } 360 volumeAttachmentInfoSet := make(chan interface{}) 361 volumeAccessor.setVolumeAttachmentInfo = func(volumeAttachments []params.VolumeAttachment) ([]params.ErrorResult, error) { 362 defer close(volumeAttachmentInfoSet) 363 return make([]params.ErrorResult, len(volumeAttachments)), nil 364 } 365 366 // mockFunc's After will progress the current time by the specified 367 // duration and signal the channel immediately. 368 clock := &mockClock{} 369 var attachVolumeTimes []time.Time 370 371 s.provider.attachVolumesFunc = func(args []storage.VolumeAttachmentParams) ([]storage.AttachVolumesResult, error) { 372 attachVolumeTimes = append(attachVolumeTimes, clock.Now()) 373 if len(attachVolumeTimes) < 10 { 374 return []storage.AttachVolumesResult{{Error: errors.New("badness")}}, nil 375 } 376 return []storage.AttachVolumesResult{{ 377 VolumeAttachment: &storage.VolumeAttachment{ 378 args[0].Volume, 379 args[0].Machine, 380 storage.VolumeAttachmentInfo{ 381 DeviceName: "/dev/sda1", 382 }, 383 }, 384 }}, nil 385 } 386 387 args := &workerArgs{volumes: volumeAccessor, clock: clock, registry: s.registry} 388 worker := newStorageProvisioner(c, args) 389 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 390 defer worker.Kill() 391 392 volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ 393 MachineTag: "machine-1", AttachmentTag: "volume-1", 394 }} 395 volumeAccessor.volumesWatcher.changes <- []string{"1"} 396 waitChannel(c, volumeInfoSet, "waiting for volume info to be set") 397 waitChannel(c, volumeAttachmentInfoSet, "waiting for volume attachments to be set") 398 c.Assert(attachVolumeTimes, gc.HasLen, 10) 399 400 // The first attempt should have been immediate: T0. 401 c.Assert(attachVolumeTimes[0], gc.Equals, time.Time{}) 402 403 delays := make([]time.Duration, len(attachVolumeTimes)-1) 404 for i := range attachVolumeTimes[1:] { 405 delays[i] = attachVolumeTimes[i+1].Sub(attachVolumeTimes[i]) 406 } 407 c.Assert(delays, jc.DeepEquals, []time.Duration{ 408 30 * time.Second, 409 1 * time.Minute, 410 2 * time.Minute, 411 4 * time.Minute, 412 8 * time.Minute, 413 16 * time.Minute, 414 30 * time.Minute, // ceiling reached 415 30 * time.Minute, 416 30 * time.Minute, 417 }) 418 419 c.Assert(args.statusSetter.args, jc.DeepEquals, []params.EntityStatusArgs{ 420 {Tag: "volume-1", Status: "attaching", Info: ""}, // CreateVolumes 421 {Tag: "volume-1", Status: "attaching", Info: "badness"}, // AttachVolumes 422 {Tag: "volume-1", Status: "attaching", Info: "badness"}, 423 {Tag: "volume-1", Status: "attaching", Info: "badness"}, 424 {Tag: "volume-1", Status: "attaching", Info: "badness"}, 425 {Tag: "volume-1", Status: "attaching", Info: "badness"}, 426 {Tag: "volume-1", Status: "attaching", Info: "badness"}, 427 {Tag: "volume-1", Status: "attaching", Info: "badness"}, 428 {Tag: "volume-1", Status: "attaching", Info: "badness"}, 429 {Tag: "volume-1", Status: "attaching", Info: "badness"}, 430 {Tag: "volume-1", Status: "attached", Info: ""}, 431 }) 432 } 433 434 func (s *storageProvisionerSuite) TestAttachFilesystemRetry(c *gc.C) { 435 filesystemInfoSet := make(chan interface{}) 436 filesystemAccessor := newMockFilesystemAccessor() 437 filesystemAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 438 filesystemAccessor.setFilesystemInfo = func(filesystems []params.Filesystem) ([]params.ErrorResult, error) { 439 defer close(filesystemInfoSet) 440 return make([]params.ErrorResult, len(filesystems)), nil 441 } 442 filesystemAttachmentInfoSet := make(chan interface{}) 443 filesystemAccessor.setFilesystemAttachmentInfo = func(filesystemAttachments []params.FilesystemAttachment) ([]params.ErrorResult, error) { 444 defer close(filesystemAttachmentInfoSet) 445 return make([]params.ErrorResult, len(filesystemAttachments)), nil 446 } 447 448 // mockFunc's After will progress the current time by the specified 449 // duration and signal the channel immediately. 450 clock := &mockClock{} 451 var attachFilesystemTimes []time.Time 452 453 s.provider.attachFilesystemsFunc = func(args []storage.FilesystemAttachmentParams) ([]storage.AttachFilesystemsResult, error) { 454 attachFilesystemTimes = append(attachFilesystemTimes, clock.Now()) 455 if len(attachFilesystemTimes) < 10 { 456 return []storage.AttachFilesystemsResult{{Error: errors.New("badness")}}, nil 457 } 458 return []storage.AttachFilesystemsResult{{ 459 FilesystemAttachment: &storage.FilesystemAttachment{ 460 args[0].Filesystem, 461 args[0].Machine, 462 storage.FilesystemAttachmentInfo{ 463 Path: "/oh/over/there", 464 }, 465 }, 466 }}, nil 467 } 468 469 args := &workerArgs{filesystems: filesystemAccessor, clock: clock, registry: s.registry} 470 worker := newStorageProvisioner(c, args) 471 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 472 defer worker.Kill() 473 474 filesystemAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ 475 MachineTag: "machine-1", AttachmentTag: "filesystem-1", 476 }} 477 filesystemAccessor.filesystemsWatcher.changes <- []string{"1"} 478 waitChannel(c, filesystemInfoSet, "waiting for filesystem info to be set") 479 waitChannel(c, filesystemAttachmentInfoSet, "waiting for filesystem attachments to be set") 480 c.Assert(attachFilesystemTimes, gc.HasLen, 10) 481 482 // The first attempt should have been immediate: T0. 483 c.Assert(attachFilesystemTimes[0], gc.Equals, time.Time{}) 484 485 delays := make([]time.Duration, len(attachFilesystemTimes)-1) 486 for i := range attachFilesystemTimes[1:] { 487 delays[i] = attachFilesystemTimes[i+1].Sub(attachFilesystemTimes[i]) 488 } 489 c.Assert(delays, jc.DeepEquals, []time.Duration{ 490 30 * time.Second, 491 1 * time.Minute, 492 2 * time.Minute, 493 4 * time.Minute, 494 8 * time.Minute, 495 16 * time.Minute, 496 30 * time.Minute, // ceiling reached 497 30 * time.Minute, 498 30 * time.Minute, 499 }) 500 501 c.Assert(args.statusSetter.args, jc.DeepEquals, []params.EntityStatusArgs{ 502 {Tag: "filesystem-1", Status: "attaching", Info: ""}, // CreateFilesystems 503 {Tag: "filesystem-1", Status: "attaching", Info: "badness"}, // AttachFilesystems 504 {Tag: "filesystem-1", Status: "attaching", Info: "badness"}, 505 {Tag: "filesystem-1", Status: "attaching", Info: "badness"}, 506 {Tag: "filesystem-1", Status: "attaching", Info: "badness"}, 507 {Tag: "filesystem-1", Status: "attaching", Info: "badness"}, 508 {Tag: "filesystem-1", Status: "attaching", Info: "badness"}, 509 {Tag: "filesystem-1", Status: "attaching", Info: "badness"}, 510 {Tag: "filesystem-1", Status: "attaching", Info: "badness"}, 511 {Tag: "filesystem-1", Status: "attaching", Info: "badness"}, 512 {Tag: "filesystem-1", Status: "attached", Info: ""}, 513 }) 514 } 515 516 func (s *storageProvisionerSuite) TestValidateVolumeParams(c *gc.C) { 517 volumeAccessor := newMockVolumeAccessor() 518 volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 519 volumeAccessor.provisionedVolumes["volume-3"] = params.Volume{ 520 VolumeTag: "volume-3", 521 Info: params.VolumeInfo{VolumeId: "vol-ume"}, 522 } 523 524 var validateCalls int 525 validated := make(chan interface{}, 1) 526 s.provider.validateVolumeParamsFunc = func(p storage.VolumeParams) error { 527 validateCalls++ 528 validated <- p 529 switch p.Tag.String() { 530 case "volume-1", "volume-3": 531 return errors.New("something is wrong") 532 } 533 return nil 534 } 535 536 life := func(tags []names.Tag) ([]params.LifeResult, error) { 537 results := make([]params.LifeResult, len(tags)) 538 for i := range results { 539 switch tags[i].String() { 540 case "volume-3": 541 results[i].Life = params.Dead 542 default: 543 results[i].Life = params.Alive 544 } 545 } 546 return results, nil 547 } 548 549 createdVolumes := make(chan interface{}, 1) 550 s.provider.createVolumesFunc = func(args []storage.VolumeParams) ([]storage.CreateVolumesResult, error) { 551 createdVolumes <- args 552 if len(args) != 1 { 553 return nil, errors.New("expected one argument") 554 } 555 return []storage.CreateVolumesResult{{ 556 Volume: &storage.Volume{Tag: args[0].Tag}, 557 }}, nil 558 } 559 560 destroyedVolumes := make(chan interface{}, 1) 561 s.provider.destroyVolumesFunc = func(volumeIds []string) ([]error, error) { 562 destroyedVolumes <- volumeIds 563 return make([]error, len(volumeIds)), nil 564 } 565 566 args := &workerArgs{ 567 volumes: volumeAccessor, 568 life: &mockLifecycleManager{ 569 life: life, 570 }, 571 registry: s.registry, 572 } 573 worker := newStorageProvisioner(c, args) 574 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 575 defer worker.Kill() 576 577 volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ 578 MachineTag: "machine-1", AttachmentTag: "volume-1", 579 }, { 580 MachineTag: "machine-1", AttachmentTag: "volume-2", 581 }} 582 volumeAccessor.volumesWatcher.changes <- []string{"1"} 583 waitChannel(c, validated, "waiting for volume parameter validation") 584 assertNoEvent(c, createdVolumes, "volume created") 585 c.Assert(validateCalls, gc.Equals, 1) 586 587 // Failure to create volume-1 should not block creation volume-2. 588 volumeAccessor.volumesWatcher.changes <- []string{"2"} 589 waitChannel(c, validated, "waiting for volume parameter validation") 590 createVolumeParams := waitChannel(c, createdVolumes, "volume created").([]storage.VolumeParams) 591 c.Assert(createVolumeParams, gc.HasLen, 1) 592 c.Assert(createVolumeParams[0].Tag.String(), gc.Equals, "volume-2") 593 c.Assert(validateCalls, gc.Equals, 2) 594 595 // destroying filesystems does not validate parameters 596 volumeAccessor.volumesWatcher.changes <- []string{"3"} 597 assertNoEvent(c, validated, "volume destruction params validated") 598 destroyVolumeParams := waitChannel(c, destroyedVolumes, "volume destroyed").([]string) 599 c.Assert(destroyVolumeParams, jc.DeepEquals, []string{"vol-ume"}) 600 c.Assert(validateCalls, gc.Equals, 2) // no change 601 602 c.Assert(args.statusSetter.args, jc.DeepEquals, []params.EntityStatusArgs{ 603 {Tag: "volume-1", Status: "error", Info: "something is wrong"}, 604 {Tag: "volume-2", Status: "attaching"}, 605 // destroyed volumes are removed immediately, 606 // so there is no status update. 607 }) 608 } 609 610 func (s *storageProvisionerSuite) TestValidateFilesystemParams(c *gc.C) { 611 filesystemAccessor := newMockFilesystemAccessor() 612 filesystemAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 613 filesystemAccessor.provisionedFilesystems["filesystem-3"] = params.Filesystem{ 614 FilesystemTag: "filesystem-3", 615 Info: params.FilesystemInfo{FilesystemId: "fs-id"}, 616 } 617 618 var validateCalls int 619 validated := make(chan interface{}, 1) 620 s.provider.validateFilesystemParamsFunc = func(p storage.FilesystemParams) error { 621 validateCalls++ 622 validated <- p 623 switch p.Tag.String() { 624 case "filesystem-1", "filesystem-3": 625 return errors.New("something is wrong") 626 } 627 return nil 628 } 629 630 life := func(tags []names.Tag) ([]params.LifeResult, error) { 631 results := make([]params.LifeResult, len(tags)) 632 for i := range results { 633 switch tags[i].String() { 634 case "filesystem-3": 635 results[i].Life = params.Dead 636 default: 637 results[i].Life = params.Alive 638 } 639 } 640 return results, nil 641 } 642 643 createdFilesystems := make(chan interface{}, 1) 644 s.provider.createFilesystemsFunc = func(args []storage.FilesystemParams) ([]storage.CreateFilesystemsResult, error) { 645 createdFilesystems <- args 646 if len(args) != 1 { 647 return nil, errors.New("expected one argument") 648 } 649 return []storage.CreateFilesystemsResult{{ 650 Filesystem: &storage.Filesystem{Tag: args[0].Tag}, 651 }}, nil 652 } 653 654 destroyedFilesystems := make(chan interface{}, 1) 655 s.provider.destroyFilesystemsFunc = func(filesystemIds []string) ([]error, error) { 656 destroyedFilesystems <- filesystemIds 657 return make([]error, len(filesystemIds)), nil 658 } 659 660 args := &workerArgs{ 661 filesystems: filesystemAccessor, 662 life: &mockLifecycleManager{ 663 life: life, 664 }, 665 registry: s.registry, 666 } 667 worker := newStorageProvisioner(c, args) 668 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 669 defer worker.Kill() 670 671 filesystemAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ 672 MachineTag: "machine-1", AttachmentTag: "filesystem-1", 673 }, { 674 MachineTag: "machine-1", AttachmentTag: "filesystem-2", 675 }} 676 filesystemAccessor.filesystemsWatcher.changes <- []string{"1"} 677 waitChannel(c, validated, "waiting for filesystem parameter validation") 678 assertNoEvent(c, createdFilesystems, "filesystem created") 679 c.Assert(validateCalls, gc.Equals, 1) 680 681 // Failure to create filesystem-1 should not block creation filesystem-2. 682 filesystemAccessor.filesystemsWatcher.changes <- []string{"2"} 683 waitChannel(c, validated, "waiting for filesystem parameter validation") 684 createFilesystemParams := waitChannel(c, createdFilesystems, "filesystem created").([]storage.FilesystemParams) 685 c.Assert(createFilesystemParams, gc.HasLen, 1) 686 c.Assert(createFilesystemParams[0].Tag.String(), gc.Equals, "filesystem-2") 687 c.Assert(validateCalls, gc.Equals, 2) 688 689 // destroying filesystems does not validate parameters 690 filesystemAccessor.filesystemsWatcher.changes <- []string{"3"} 691 assertNoEvent(c, validated, "filesystem destruction params validated") 692 destroyFilesystemParams := waitChannel(c, destroyedFilesystems, "filesystem destroyed").([]string) 693 c.Assert(destroyFilesystemParams, jc.DeepEquals, []string{"fs-id"}) 694 c.Assert(validateCalls, gc.Equals, 2) // no change 695 696 c.Assert(args.statusSetter.args, jc.DeepEquals, []params.EntityStatusArgs{ 697 {Tag: "filesystem-1", Status: "error", Info: "something is wrong"}, 698 {Tag: "filesystem-2", Status: "attaching"}, 699 // destroyed filesystems are removed immediately, 700 // so there is no status update. 701 }) 702 } 703 704 func (s *storageProvisionerSuite) TestFilesystemAdded(c *gc.C) { 705 expectedFilesystems := []params.Filesystem{{ 706 FilesystemTag: "filesystem-1", 707 Info: params.FilesystemInfo{ 708 FilesystemId: "id-1", 709 Size: 1024, 710 }, 711 }, { 712 FilesystemTag: "filesystem-2", 713 Info: params.FilesystemInfo{ 714 FilesystemId: "id-2", 715 Size: 1024, 716 }, 717 }} 718 719 filesystemInfoSet := make(chan interface{}) 720 filesystemAccessor := newMockFilesystemAccessor() 721 filesystemAccessor.setFilesystemInfo = func(filesystems []params.Filesystem) ([]params.ErrorResult, error) { 722 defer close(filesystemInfoSet) 723 c.Assert(filesystems, jc.SameContents, expectedFilesystems) 724 return nil, nil 725 } 726 727 args := &workerArgs{filesystems: filesystemAccessor, registry: s.registry} 728 worker := newStorageProvisioner(c, args) 729 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 730 defer worker.Kill() 731 732 // The worker should create filesystems according to ids "1" and "2". 733 filesystemAccessor.filesystemsWatcher.changes <- []string{"1", "2"} 734 waitChannel(c, filesystemInfoSet, "waiting for filesystem info to be set") 735 } 736 737 func (s *storageProvisionerSuite) TestVolumeNeedsInstance(c *gc.C) { 738 volumeInfoSet := make(chan interface{}) 739 volumeAccessor := newMockVolumeAccessor() 740 volumeAccessor.setVolumeInfo = func([]params.Volume) ([]params.ErrorResult, error) { 741 defer close(volumeInfoSet) 742 return nil, nil 743 } 744 volumeAccessor.setVolumeAttachmentInfo = func([]params.VolumeAttachment) ([]params.ErrorResult, error) { 745 return nil, nil 746 } 747 748 args := &workerArgs{volumes: volumeAccessor, registry: s.registry} 749 worker := newStorageProvisioner(c, args) 750 defer worker.Wait() 751 defer worker.Kill() 752 753 volumeAccessor.volumesWatcher.changes <- []string{needsInstanceVolumeId} 754 assertNoEvent(c, volumeInfoSet, "volume info set") 755 args.machines.instanceIds[names.NewMachineTag("1")] = "inst-id" 756 args.machines.watcher.changes <- struct{}{} 757 waitChannel(c, volumeInfoSet, "waiting for volume info to be set") 758 } 759 760 // TestVolumeIncoherent tests that we do not panic when observing 761 // a pending volume that has no attachments. We send a volume 762 // update for a volume that is alive and unprovisioned, but has 763 // no machine attachment. Such volumes are ignored by the storage 764 // provisioner. 765 // 766 // See: https://bugs.launchpad.net/juju/+bug/1732616 767 func (s *storageProvisionerSuite) TestVolumeIncoherent(c *gc.C) { 768 volumeAccessor := newMockVolumeAccessor() 769 args := &workerArgs{volumes: volumeAccessor, registry: s.registry} 770 worker := newStorageProvisioner(c, args) 771 defer workertest.CleanKill(c, worker) 772 773 // Send 3 times, because the channel has a buffer size of 1. 774 // The third send guarantees we've sent at least the 2nd one 775 // through, which means at least the 1st has been processed 776 // (and ignored). 777 for i := 0; i < 3; i++ { 778 volumeAccessor.volumesWatcher.changes <- []string{noAttachmentVolumeId} 779 } 780 } 781 782 func (s *storageProvisionerSuite) TestVolumeNonDynamic(c *gc.C) { 783 volumeInfoSet := make(chan interface{}) 784 volumeAccessor := newMockVolumeAccessor() 785 volumeAccessor.setVolumeInfo = func([]params.Volume) ([]params.ErrorResult, error) { 786 defer close(volumeInfoSet) 787 return nil, nil 788 } 789 790 args := &workerArgs{volumes: volumeAccessor, registry: s.registry} 791 worker := newStorageProvisioner(c, args) 792 defer worker.Wait() 793 defer worker.Kill() 794 795 // Volumes for non-dynamic providers should not be created. 796 s.provider.dynamic = false 797 volumeAccessor.volumesWatcher.changes <- []string{"1"} 798 assertNoEvent(c, volumeInfoSet, "volume info set") 799 } 800 801 func (s *storageProvisionerSuite) TestVolumeAttachmentAdded(c *gc.C) { 802 // We should get two volume attachments: 803 // - volume-1 to machine-1, because the volume and 804 // machine are provisioned, but the attachment is not. 805 // - volume-1 to machine-0, because the volume, 806 // machine, and attachment are provisioned, but 807 // in a previous session, so a reattachment is 808 // requested. 809 expectedVolumeAttachments := []params.VolumeAttachment{{ 810 VolumeTag: "volume-1", 811 MachineTag: "machine-1", 812 Info: params.VolumeAttachmentInfo{ 813 DeviceName: "/dev/sda1", 814 ReadOnly: true, 815 }, 816 }, { 817 VolumeTag: "volume-1", 818 MachineTag: "machine-0", 819 Info: params.VolumeAttachmentInfo{ 820 DeviceName: "/dev/sda1", 821 ReadOnly: true, 822 }, 823 }} 824 825 var allVolumeAttachments []params.VolumeAttachment 826 volumeAttachmentInfoSet := make(chan interface{}) 827 volumeAccessor := newMockVolumeAccessor() 828 volumeAccessor.setVolumeAttachmentInfo = func(volumeAttachments []params.VolumeAttachment) ([]params.ErrorResult, error) { 829 allVolumeAttachments = append(allVolumeAttachments, volumeAttachments...) 830 volumeAttachmentInfoSet <- nil 831 return make([]params.ErrorResult, len(volumeAttachments)), nil 832 } 833 834 // volume-1, machine-0, and machine-1 are provisioned. 835 volumeAccessor.provisionedVolumes["volume-1"] = params.Volume{ 836 VolumeTag: "volume-1", 837 Info: params.VolumeInfo{ 838 VolumeId: "vol-123", 839 }, 840 } 841 volumeAccessor.provisionedMachines["machine-0"] = instance.Id("already-provisioned-0") 842 volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 843 844 // machine-0/volume-1 attachment is already created. 845 // We should see a reattachment. 846 alreadyAttached := params.MachineStorageId{ 847 MachineTag: "machine-0", 848 AttachmentTag: "volume-1", 849 } 850 volumeAccessor.provisionedAttachments[alreadyAttached] = params.VolumeAttachment{ 851 MachineTag: "machine-0", 852 VolumeTag: "volume-1", 853 } 854 855 args := &workerArgs{volumes: volumeAccessor, registry: s.registry} 856 worker := newStorageProvisioner(c, args) 857 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 858 defer worker.Kill() 859 860 volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ 861 MachineTag: "machine-1", AttachmentTag: "volume-1", 862 }, { 863 MachineTag: "machine-1", AttachmentTag: "volume-2", 864 }, { 865 MachineTag: "machine-2", AttachmentTag: "volume-1", 866 }, { 867 MachineTag: "machine-0", AttachmentTag: "volume-1", 868 }} 869 assertNoEvent(c, volumeAttachmentInfoSet, "volume attachment info set") 870 volumeAccessor.volumesWatcher.changes <- []string{"1"} 871 waitChannel(c, volumeAttachmentInfoSet, "waiting for volume attachments to be set") 872 c.Assert(allVolumeAttachments, jc.SameContents, expectedVolumeAttachments) 873 874 // Reattachment should only happen once per session. 875 volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ 876 MachineTag: "machine-0", 877 AttachmentTag: "volume-1", 878 }} 879 assertNoEvent(c, volumeAttachmentInfoSet, "volume attachment info set") 880 } 881 882 func (s *storageProvisionerSuite) TestVolumeAttachmentNoStaticReattachment(c *gc.C) { 883 // Static storage should never be reattached. 884 s.provider.dynamic = false 885 886 volumeAttachmentInfoSet := make(chan interface{}) 887 volumeAccessor := newMockVolumeAccessor() 888 volumeAccessor.setVolumeAttachmentInfo = func(volumeAttachments []params.VolumeAttachment) ([]params.ErrorResult, error) { 889 volumeAttachmentInfoSet <- nil 890 return make([]params.ErrorResult, len(volumeAttachments)), nil 891 } 892 893 // volume-1, machine-0, and machine-1 are provisioned. 894 volumeAccessor.provisionedVolumes["volume-1"] = params.Volume{ 895 VolumeTag: "volume-1", 896 Info: params.VolumeInfo{ 897 VolumeId: "vol-123", 898 }, 899 } 900 volumeAccessor.provisionedMachines["machine-0"] = instance.Id("already-provisioned-0") 901 volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 902 903 alreadyAttached := params.MachineStorageId{ 904 MachineTag: "machine-0", 905 AttachmentTag: "volume-1", 906 } 907 volumeAccessor.provisionedAttachments[alreadyAttached] = params.VolumeAttachment{ 908 MachineTag: "machine-0", 909 VolumeTag: "volume-1", 910 } 911 912 args := &workerArgs{volumes: volumeAccessor, registry: s.registry} 913 worker := newStorageProvisioner(c, args) 914 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 915 defer worker.Kill() 916 917 volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ 918 MachineTag: "machine-0", AttachmentTag: "volume-1", 919 }} 920 volumeAccessor.volumesWatcher.changes <- []string{"1"} 921 assertNoEvent(c, volumeAttachmentInfoSet, "volume attachment info set") 922 } 923 924 func (s *storageProvisionerSuite) TestFilesystemAttachmentAdded(c *gc.C) { 925 // We should only get a single filesystem attachment, because it is the 926 // only combination where both machine and filesystem are already 927 // provisioned, and the attachmenti s not. 928 // We should get two filesystem attachments: 929 // - filesystem-1 to machine-1, because the filesystem and 930 // machine are provisioned, but the attachment is not. 931 // - filesystem-1 to machine-0, because the filesystem, 932 // machine, and attachment are provisioned, but in a 933 // previous session, so a reattachment is requested. 934 expectedFilesystemAttachments := []params.FilesystemAttachment{{ 935 FilesystemTag: "filesystem-1", 936 MachineTag: "machine-1", 937 Info: params.FilesystemAttachmentInfo{ 938 MountPoint: "/srv/fs-123", 939 }, 940 }, { 941 FilesystemTag: "filesystem-1", 942 MachineTag: "machine-0", 943 Info: params.FilesystemAttachmentInfo{ 944 MountPoint: "/srv/fs-123", 945 }, 946 }} 947 948 var allFilesystemAttachments []params.FilesystemAttachment 949 filesystemAttachmentInfoSet := make(chan interface{}) 950 filesystemAccessor := newMockFilesystemAccessor() 951 filesystemAccessor.setFilesystemAttachmentInfo = func(filesystemAttachments []params.FilesystemAttachment) ([]params.ErrorResult, error) { 952 allFilesystemAttachments = append(allFilesystemAttachments, filesystemAttachments...) 953 filesystemAttachmentInfoSet <- nil 954 return make([]params.ErrorResult, len(filesystemAttachments)), nil 955 } 956 957 // filesystem-1 and machine-1 are provisioned. 958 filesystemAccessor.provisionedFilesystems["filesystem-1"] = params.Filesystem{ 959 FilesystemTag: "filesystem-1", 960 Info: params.FilesystemInfo{ 961 FilesystemId: "fs-123", 962 }, 963 } 964 filesystemAccessor.provisionedMachines["machine-0"] = instance.Id("already-provisioned-0") 965 filesystemAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 966 967 // machine-0/filesystem-1 attachment is already created. 968 // We should see a reattachment. 969 alreadyAttached := params.MachineStorageId{ 970 MachineTag: "machine-0", 971 AttachmentTag: "filesystem-1", 972 } 973 filesystemAccessor.provisionedAttachments[alreadyAttached] = params.FilesystemAttachment{ 974 MachineTag: "machine-0", 975 FilesystemTag: "filesystem-1", 976 } 977 978 args := &workerArgs{filesystems: filesystemAccessor, registry: s.registry} 979 worker := newStorageProvisioner(c, args) 980 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 981 defer worker.Kill() 982 983 filesystemAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ 984 MachineTag: "machine-1", AttachmentTag: "filesystem-1", 985 }, { 986 MachineTag: "machine-1", AttachmentTag: "filesystem-2", 987 }, { 988 MachineTag: "machine-2", AttachmentTag: "filesystem-1", 989 }, { 990 MachineTag: "machine-0", AttachmentTag: "filesystem-1", 991 }} 992 assertNoEvent(c, filesystemAttachmentInfoSet, "filesystem attachment info set") 993 filesystemAccessor.filesystemsWatcher.changes <- []string{"1"} 994 waitChannel(c, filesystemAttachmentInfoSet, "waiting for filesystem attachments to be set") 995 c.Assert(allFilesystemAttachments, jc.SameContents, expectedFilesystemAttachments) 996 997 // Reattachment should only happen once per session. 998 filesystemAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ 999 MachineTag: "machine-0", 1000 AttachmentTag: "filesystem-1", 1001 }} 1002 assertNoEvent(c, filesystemAttachmentInfoSet, "filesystem attachment info set") 1003 } 1004 1005 func (s *storageProvisionerSuite) TestCreateVolumeBackedFilesystem(c *gc.C) { 1006 filesystemInfoSet := make(chan interface{}) 1007 filesystemAccessor := newMockFilesystemAccessor() 1008 filesystemAccessor.setFilesystemInfo = func(filesystems []params.Filesystem) ([]params.ErrorResult, error) { 1009 filesystemInfoSet <- filesystems 1010 return nil, nil 1011 } 1012 1013 args := &workerArgs{ 1014 scope: names.NewMachineTag("0"), 1015 filesystems: filesystemAccessor, 1016 registry: s.registry, 1017 } 1018 worker := newStorageProvisioner(c, args) 1019 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 1020 defer worker.Kill() 1021 1022 args.volumes.blockDevices[params.MachineStorageId{ 1023 MachineTag: "machine-0", 1024 AttachmentTag: "volume-0-0", 1025 }] = storage.BlockDevice{ 1026 DeviceName: "xvdf1", 1027 Size: 123, 1028 } 1029 filesystemAccessor.filesystemsWatcher.changes <- []string{"0/0", "0/1"} 1030 1031 // Only the block device for volume 0/0 is attached at the moment, 1032 // so only the corresponding filesystem will be created. 1033 filesystemInfo := waitChannel( 1034 c, filesystemInfoSet, 1035 "waiting for filesystem info to be set", 1036 ).([]params.Filesystem) 1037 c.Assert(filesystemInfo, jc.DeepEquals, []params.Filesystem{{ 1038 FilesystemTag: "filesystem-0-0", 1039 Info: params.FilesystemInfo{ 1040 FilesystemId: "xvdf1", 1041 Size: 123, 1042 }, 1043 }}) 1044 1045 // If we now attach the block device for volume 0/1 and trigger the 1046 // notification, then the storage provisioner will wake up and create 1047 // the filesystem. 1048 args.volumes.blockDevices[params.MachineStorageId{ 1049 MachineTag: "machine-0", 1050 AttachmentTag: "volume-0-1", 1051 }] = storage.BlockDevice{ 1052 DeviceName: "xvdf2", 1053 Size: 246, 1054 } 1055 args.volumes.blockDevicesWatcher.changes <- struct{}{} 1056 filesystemInfo = waitChannel( 1057 c, filesystemInfoSet, 1058 "waiting for filesystem info to be set", 1059 ).([]params.Filesystem) 1060 c.Assert(filesystemInfo, jc.DeepEquals, []params.Filesystem{{ 1061 FilesystemTag: "filesystem-0-1", 1062 Info: params.FilesystemInfo{ 1063 FilesystemId: "xvdf2", 1064 Size: 246, 1065 }, 1066 }}) 1067 } 1068 1069 func (s *storageProvisionerSuite) TestAttachVolumeBackedFilesystem(c *gc.C) { 1070 infoSet := make(chan interface{}) 1071 filesystemAccessor := newMockFilesystemAccessor() 1072 filesystemAccessor.setFilesystemAttachmentInfo = func(attachments []params.FilesystemAttachment) ([]params.ErrorResult, error) { 1073 infoSet <- attachments 1074 return nil, nil 1075 } 1076 1077 args := &workerArgs{ 1078 scope: names.NewMachineTag("0"), 1079 filesystems: filesystemAccessor, 1080 registry: s.registry, 1081 } 1082 worker := newStorageProvisioner(c, args) 1083 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 1084 defer worker.Kill() 1085 1086 filesystemAccessor.provisionedFilesystems["filesystem-0-0"] = params.Filesystem{ 1087 FilesystemTag: "filesystem-0-0", 1088 VolumeTag: "volume-0-0", 1089 Info: params.FilesystemInfo{ 1090 FilesystemId: "whatever", 1091 Size: 123, 1092 }, 1093 } 1094 filesystemAccessor.provisionedMachines["machine-0"] = instance.Id("already-provisioned-0") 1095 1096 args.volumes.blockDevices[params.MachineStorageId{ 1097 MachineTag: "machine-0", 1098 AttachmentTag: "volume-0-0", 1099 }] = storage.BlockDevice{ 1100 DeviceName: "xvdf1", 1101 Size: 123, 1102 } 1103 filesystemAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ 1104 MachineTag: "machine-0", 1105 AttachmentTag: "filesystem-0-0", 1106 }} 1107 filesystemAccessor.filesystemsWatcher.changes <- []string{"0/0"} 1108 1109 info := waitChannel( 1110 c, infoSet, "waiting for filesystem attachment info to be set", 1111 ).([]params.FilesystemAttachment) 1112 c.Assert(info, jc.DeepEquals, []params.FilesystemAttachment{{ 1113 FilesystemTag: "filesystem-0-0", 1114 MachineTag: "machine-0", 1115 Info: params.FilesystemAttachmentInfo{ 1116 MountPoint: "/mnt/xvdf1", 1117 ReadOnly: true, 1118 }, 1119 }}) 1120 } 1121 1122 func (s *storageProvisionerSuite) TestResourceTags(c *gc.C) { 1123 volumeInfoSet := make(chan interface{}) 1124 volumeAccessor := newMockVolumeAccessor() 1125 volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 1126 volumeAccessor.setVolumeInfo = func(volumes []params.Volume) ([]params.ErrorResult, error) { 1127 defer close(volumeInfoSet) 1128 return nil, nil 1129 } 1130 1131 filesystemInfoSet := make(chan interface{}) 1132 filesystemAccessor := newMockFilesystemAccessor() 1133 filesystemAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 1134 filesystemAccessor.setFilesystemInfo = func(filesystems []params.Filesystem) ([]params.ErrorResult, error) { 1135 defer close(filesystemInfoSet) 1136 return nil, nil 1137 } 1138 1139 var volumeSource dummyVolumeSource 1140 s.provider.volumeSourceFunc = func(sourceConfig *storage.Config) (storage.VolumeSource, error) { 1141 return &volumeSource, nil 1142 } 1143 1144 var filesystemSource dummyFilesystemSource 1145 s.provider.filesystemSourceFunc = func(sourceConfig *storage.Config) (storage.FilesystemSource, error) { 1146 return &filesystemSource, nil 1147 } 1148 1149 args := &workerArgs{ 1150 volumes: volumeAccessor, 1151 filesystems: filesystemAccessor, 1152 registry: s.registry, 1153 } 1154 worker := newStorageProvisioner(c, args) 1155 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 1156 defer worker.Kill() 1157 1158 volumeAccessor.volumesWatcher.changes <- []string{"1"} 1159 filesystemAccessor.filesystemsWatcher.changes <- []string{"1"} 1160 waitChannel(c, volumeInfoSet, "waiting for volume info to be set") 1161 waitChannel(c, filesystemInfoSet, "waiting for filesystem info to be set") 1162 c.Assert(volumeSource.createVolumesArgs, jc.DeepEquals, [][]storage.VolumeParams{{{ 1163 Tag: names.NewVolumeTag("1"), 1164 Size: 1024, 1165 Provider: "dummy", 1166 Attributes: map[string]interface{}{"persistent": true}, 1167 ResourceTags: map[string]string{"very": "fancy"}, 1168 Attachment: &storage.VolumeAttachmentParams{ 1169 Volume: names.NewVolumeTag("1"), 1170 AttachmentParams: storage.AttachmentParams{ 1171 Machine: names.NewMachineTag("1"), 1172 Provider: "dummy", 1173 InstanceId: "already-provisioned-1", 1174 ReadOnly: true, 1175 }, 1176 }, 1177 }}}) 1178 c.Assert(filesystemSource.createFilesystemsArgs, jc.DeepEquals, [][]storage.FilesystemParams{{{ 1179 Tag: names.NewFilesystemTag("1"), 1180 Size: 1024, 1181 Provider: "dummy", 1182 ResourceTags: map[string]string{"very": "fancy"}, 1183 }}}) 1184 } 1185 1186 func (s *storageProvisionerSuite) TestSetVolumeInfoErrorStopsWorker(c *gc.C) { 1187 volumeAccessor := newMockVolumeAccessor() 1188 volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 1189 volumeAccessor.setVolumeInfo = func(volumes []params.Volume) ([]params.ErrorResult, error) { 1190 return nil, errors.New("belly up") 1191 } 1192 1193 args := &workerArgs{volumes: volumeAccessor, registry: s.registry} 1194 worker := newStorageProvisioner(c, args) 1195 defer worker.Wait() 1196 defer worker.Kill() 1197 1198 done := make(chan interface{}) 1199 go func() { 1200 defer close(done) 1201 err := worker.Wait() 1202 c.Assert(err, gc.ErrorMatches, "creating volumes: publishing volumes to state: belly up") 1203 }() 1204 1205 args.volumes.volumesWatcher.changes <- []string{"1"} 1206 waitChannel(c, done, "waiting for worker to exit") 1207 } 1208 1209 func (s *storageProvisionerSuite) TestSetVolumeInfoErrorResultDoesNotStopWorker(c *gc.C) { 1210 volumeAccessor := newMockVolumeAccessor() 1211 volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 1212 volumeAccessor.setVolumeInfo = func(volumes []params.Volume) ([]params.ErrorResult, error) { 1213 return []params.ErrorResult{{Error: ¶ms.Error{Message: "message", Code: "code"}}}, nil 1214 } 1215 1216 args := &workerArgs{volumes: volumeAccessor, registry: s.registry} 1217 worker := newStorageProvisioner(c, args) 1218 defer func() { 1219 err := worker.Wait() 1220 c.Assert(err, jc.ErrorIsNil) 1221 }() 1222 defer worker.Kill() 1223 1224 done := make(chan interface{}) 1225 go func() { 1226 defer close(done) 1227 worker.Wait() 1228 }() 1229 1230 args.volumes.volumesWatcher.changes <- []string{"1"} 1231 assertNoEvent(c, done, "worker exited") 1232 } 1233 1234 func (s *storageProvisionerSuite) TestDetachVolumesUnattached(c *gc.C) { 1235 removed := make(chan interface{}) 1236 removeAttachments := func(ids []params.MachineStorageId) ([]params.ErrorResult, error) { 1237 defer close(removed) 1238 c.Assert(ids, gc.DeepEquals, []params.MachineStorageId{{ 1239 MachineTag: "machine-0", 1240 AttachmentTag: "volume-0", 1241 }}) 1242 return make([]params.ErrorResult, len(ids)), nil 1243 } 1244 1245 args := &workerArgs{ 1246 life: &mockLifecycleManager{removeAttachments: removeAttachments}, 1247 registry: s.registry, 1248 } 1249 worker := newStorageProvisioner(c, args) 1250 defer worker.Wait() 1251 defer worker.Kill() 1252 1253 args.volumes.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ 1254 MachineTag: "machine-0", AttachmentTag: "volume-0", 1255 }} 1256 waitChannel(c, removed, "waiting for attachment to be removed") 1257 } 1258 1259 func (s *storageProvisionerSuite) TestDetachVolumes(c *gc.C) { 1260 var attached bool 1261 volumeAttachmentInfoSet := make(chan interface{}) 1262 volumeAccessor := newMockVolumeAccessor() 1263 volumeAccessor.setVolumeAttachmentInfo = func(volumeAttachments []params.VolumeAttachment) ([]params.ErrorResult, error) { 1264 close(volumeAttachmentInfoSet) 1265 attached = true 1266 for _, a := range volumeAttachments { 1267 id := params.MachineStorageId{ 1268 MachineTag: a.MachineTag, 1269 AttachmentTag: a.VolumeTag, 1270 } 1271 volumeAccessor.provisionedAttachments[id] = a 1272 } 1273 return make([]params.ErrorResult, len(volumeAttachments)), nil 1274 } 1275 1276 expectedAttachmentIds := []params.MachineStorageId{{ 1277 MachineTag: "machine-1", AttachmentTag: "volume-1", 1278 }} 1279 1280 attachmentLife := func(ids []params.MachineStorageId) ([]params.LifeResult, error) { 1281 c.Assert(ids, gc.DeepEquals, expectedAttachmentIds) 1282 life := params.Alive 1283 if attached { 1284 life = params.Dying 1285 } 1286 return []params.LifeResult{{Life: life}}, nil 1287 } 1288 1289 detached := make(chan interface{}) 1290 s.provider.detachVolumesFunc = func(args []storage.VolumeAttachmentParams) ([]error, error) { 1291 c.Assert(args, gc.HasLen, 1) 1292 c.Assert(args[0].Machine.String(), gc.Equals, expectedAttachmentIds[0].MachineTag) 1293 c.Assert(args[0].Volume.String(), gc.Equals, expectedAttachmentIds[0].AttachmentTag) 1294 defer close(detached) 1295 return make([]error, len(args)), nil 1296 } 1297 1298 removed := make(chan interface{}) 1299 removeAttachments := func(ids []params.MachineStorageId) ([]params.ErrorResult, error) { 1300 c.Assert(ids, gc.DeepEquals, expectedAttachmentIds) 1301 close(removed) 1302 return make([]params.ErrorResult, len(ids)), nil 1303 } 1304 1305 // volume-1 and machine-1 are provisioned. 1306 volumeAccessor.provisionedVolumes["volume-1"] = params.Volume{ 1307 VolumeTag: "volume-1", 1308 Info: params.VolumeInfo{ 1309 VolumeId: "vol-123", 1310 }, 1311 } 1312 volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 1313 1314 args := &workerArgs{ 1315 volumes: volumeAccessor, 1316 life: &mockLifecycleManager{ 1317 attachmentLife: attachmentLife, 1318 removeAttachments: removeAttachments, 1319 }, 1320 registry: s.registry, 1321 } 1322 worker := newStorageProvisioner(c, args) 1323 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 1324 defer worker.Kill() 1325 1326 volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ 1327 MachineTag: "machine-1", AttachmentTag: "volume-1", 1328 }} 1329 volumeAccessor.volumesWatcher.changes <- []string{"1"} 1330 waitChannel(c, volumeAttachmentInfoSet, "waiting for volume attachments to be set") 1331 volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ 1332 MachineTag: "machine-1", AttachmentTag: "volume-1", 1333 }} 1334 waitChannel(c, detached, "waiting for volume to be detached") 1335 waitChannel(c, removed, "waiting for attachment to be removed") 1336 } 1337 1338 func (s *storageProvisionerSuite) TestDetachVolumesRetry(c *gc.C) { 1339 machine := names.NewMachineTag("1") 1340 volume := names.NewVolumeTag("1") 1341 attachmentId := params.MachineStorageId{ 1342 MachineTag: machine.String(), 1343 AttachmentTag: volume.String(), 1344 } 1345 volumeAccessor := newMockVolumeAccessor() 1346 volumeAccessor.provisionedAttachments[attachmentId] = params.VolumeAttachment{ 1347 MachineTag: machine.String(), 1348 VolumeTag: volume.String(), 1349 } 1350 volumeAccessor.provisionedVolumes[volume.String()] = params.Volume{ 1351 VolumeTag: volume.String(), 1352 Info: params.VolumeInfo{ 1353 VolumeId: "vol-123", 1354 }, 1355 } 1356 volumeAccessor.provisionedMachines[machine.String()] = instance.Id("already-provisioned-1") 1357 1358 attachmentLife := func(ids []params.MachineStorageId) ([]params.LifeResult, error) { 1359 return []params.LifeResult{{Life: params.Dying}}, nil 1360 } 1361 1362 // mockFunc's After will progress the current time by the specified 1363 // duration and signal the channel immediately. 1364 clock := &mockClock{} 1365 var detachVolumeTimes []time.Time 1366 1367 s.provider.detachVolumesFunc = func(args []storage.VolumeAttachmentParams) ([]error, error) { 1368 detachVolumeTimes = append(detachVolumeTimes, clock.Now()) 1369 if len(detachVolumeTimes) < 10 { 1370 return []error{errors.New("badness")}, nil 1371 } 1372 return []error{nil}, nil 1373 } 1374 1375 removed := make(chan interface{}) 1376 removeAttachments := func(ids []params.MachineStorageId) ([]params.ErrorResult, error) { 1377 close(removed) 1378 return make([]params.ErrorResult, len(ids)), nil 1379 } 1380 1381 args := &workerArgs{ 1382 volumes: volumeAccessor, 1383 clock: clock, 1384 life: &mockLifecycleManager{ 1385 attachmentLife: attachmentLife, 1386 removeAttachments: removeAttachments, 1387 }, 1388 registry: s.registry, 1389 } 1390 worker := newStorageProvisioner(c, args) 1391 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 1392 defer worker.Kill() 1393 1394 volumeAccessor.volumesWatcher.changes <- []string{volume.Id()} 1395 volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ 1396 MachineTag: machine.String(), 1397 AttachmentTag: volume.String(), 1398 }} 1399 waitChannel(c, removed, "waiting for attachment to be removed") 1400 c.Assert(detachVolumeTimes, gc.HasLen, 10) 1401 1402 // The first attempt should have been immediate: T0. 1403 c.Assert(detachVolumeTimes[0], gc.Equals, time.Time{}) 1404 1405 delays := make([]time.Duration, len(detachVolumeTimes)-1) 1406 for i := range detachVolumeTimes[1:] { 1407 delays[i] = detachVolumeTimes[i+1].Sub(detachVolumeTimes[i]) 1408 } 1409 c.Assert(delays, jc.DeepEquals, []time.Duration{ 1410 30 * time.Second, 1411 1 * time.Minute, 1412 2 * time.Minute, 1413 4 * time.Minute, 1414 8 * time.Minute, 1415 16 * time.Minute, 1416 30 * time.Minute, // ceiling reached 1417 30 * time.Minute, 1418 30 * time.Minute, 1419 }) 1420 1421 c.Assert(args.statusSetter.args, jc.DeepEquals, []params.EntityStatusArgs{ 1422 {Tag: "volume-1", Status: "detaching", Info: "badness"}, // DetachVolumes 1423 {Tag: "volume-1", Status: "detaching", Info: "badness"}, 1424 {Tag: "volume-1", Status: "detaching", Info: "badness"}, 1425 {Tag: "volume-1", Status: "detaching", Info: "badness"}, 1426 {Tag: "volume-1", Status: "detaching", Info: "badness"}, 1427 {Tag: "volume-1", Status: "detaching", Info: "badness"}, 1428 {Tag: "volume-1", Status: "detaching", Info: "badness"}, 1429 {Tag: "volume-1", Status: "detaching", Info: "badness"}, 1430 {Tag: "volume-1", Status: "detaching", Info: "badness"}, 1431 {Tag: "volume-1", Status: "detached", Info: ""}, 1432 }) 1433 } 1434 1435 func (s *storageProvisionerSuite) TestDetachFilesystemsUnattached(c *gc.C) { 1436 removed := make(chan interface{}) 1437 removeAttachments := func(ids []params.MachineStorageId) ([]params.ErrorResult, error) { 1438 defer close(removed) 1439 c.Assert(ids, gc.DeepEquals, []params.MachineStorageId{{ 1440 MachineTag: "machine-0", 1441 AttachmentTag: "filesystem-0", 1442 }}) 1443 return make([]params.ErrorResult, len(ids)), nil 1444 } 1445 1446 args := &workerArgs{ 1447 life: &mockLifecycleManager{removeAttachments: removeAttachments}, 1448 registry: s.registry, 1449 } 1450 worker := newStorageProvisioner(c, args) 1451 defer worker.Wait() 1452 defer worker.Kill() 1453 1454 args.filesystems.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ 1455 MachineTag: "machine-0", AttachmentTag: "filesystem-0", 1456 }} 1457 waitChannel(c, removed, "waiting for attachment to be removed") 1458 } 1459 1460 func (s *storageProvisionerSuite) TestDetachFilesystems(c *gc.C) { 1461 var attached bool 1462 filesystemAttachmentInfoSet := make(chan interface{}) 1463 filesystemAccessor := newMockFilesystemAccessor() 1464 filesystemAccessor.setFilesystemAttachmentInfo = func(filesystemAttachments []params.FilesystemAttachment) ([]params.ErrorResult, error) { 1465 close(filesystemAttachmentInfoSet) 1466 attached = true 1467 for _, a := range filesystemAttachments { 1468 id := params.MachineStorageId{ 1469 MachineTag: a.MachineTag, 1470 AttachmentTag: a.FilesystemTag, 1471 } 1472 filesystemAccessor.provisionedAttachments[id] = a 1473 } 1474 return make([]params.ErrorResult, len(filesystemAttachments)), nil 1475 } 1476 1477 expectedAttachmentIds := []params.MachineStorageId{{ 1478 MachineTag: "machine-1", AttachmentTag: "filesystem-1", 1479 }} 1480 1481 attachmentLife := func(ids []params.MachineStorageId) ([]params.LifeResult, error) { 1482 c.Assert(ids, gc.DeepEquals, expectedAttachmentIds) 1483 life := params.Alive 1484 if attached { 1485 life = params.Dying 1486 } 1487 return []params.LifeResult{{Life: life}}, nil 1488 } 1489 1490 detached := make(chan interface{}) 1491 s.provider.detachFilesystemsFunc = func(args []storage.FilesystemAttachmentParams) ([]error, error) { 1492 c.Assert(args, gc.HasLen, 1) 1493 c.Assert(args[0].Machine.String(), gc.Equals, expectedAttachmentIds[0].MachineTag) 1494 c.Assert(args[0].Filesystem.String(), gc.Equals, expectedAttachmentIds[0].AttachmentTag) 1495 defer close(detached) 1496 return make([]error, len(args)), nil 1497 } 1498 1499 removed := make(chan interface{}) 1500 removeAttachments := func(ids []params.MachineStorageId) ([]params.ErrorResult, error) { 1501 c.Assert(ids, gc.DeepEquals, expectedAttachmentIds) 1502 close(removed) 1503 return make([]params.ErrorResult, len(ids)), nil 1504 } 1505 1506 // filesystem-1 and machine-1 are provisioned. 1507 filesystemAccessor.provisionedFilesystems["filesystem-1"] = params.Filesystem{ 1508 FilesystemTag: "filesystem-1", 1509 Info: params.FilesystemInfo{ 1510 FilesystemId: "fs-id", 1511 }, 1512 } 1513 filesystemAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") 1514 1515 args := &workerArgs{ 1516 filesystems: filesystemAccessor, 1517 life: &mockLifecycleManager{ 1518 attachmentLife: attachmentLife, 1519 removeAttachments: removeAttachments, 1520 }, 1521 registry: s.registry, 1522 } 1523 worker := newStorageProvisioner(c, args) 1524 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 1525 defer worker.Kill() 1526 1527 filesystemAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ 1528 MachineTag: "machine-1", AttachmentTag: "filesystem-1", 1529 }} 1530 filesystemAccessor.filesystemsWatcher.changes <- []string{"1"} 1531 waitChannel(c, filesystemAttachmentInfoSet, "waiting for filesystem attachments to be set") 1532 filesystemAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ 1533 MachineTag: "machine-1", AttachmentTag: "filesystem-1", 1534 }} 1535 waitChannel(c, detached, "waiting for filesystem to be detached") 1536 waitChannel(c, removed, "waiting for attachment to be removed") 1537 } 1538 1539 func (s *storageProvisionerSuite) TestDestroyVolumes(c *gc.C) { 1540 unprovisionedVolume := names.NewVolumeTag("0") 1541 provisionedDestroyVolume := names.NewVolumeTag("1") 1542 provisionedReleaseVolume := names.NewVolumeTag("2") 1543 1544 volumeAccessor := newMockVolumeAccessor() 1545 volumeAccessor.provisionVolume(provisionedDestroyVolume) 1546 volumeAccessor.provisionVolume(provisionedReleaseVolume) 1547 1548 life := func(tags []names.Tag) ([]params.LifeResult, error) { 1549 results := make([]params.LifeResult, len(tags)) 1550 for i := range results { 1551 results[i].Life = params.Dead 1552 } 1553 return results, nil 1554 } 1555 1556 destroyedChan := make(chan interface{}, 1) 1557 s.provider.destroyVolumesFunc = func(volumeIds []string) ([]error, error) { 1558 destroyedChan <- volumeIds 1559 return make([]error, len(volumeIds)), nil 1560 } 1561 1562 releasedChan := make(chan interface{}, 1) 1563 s.provider.releaseVolumesFunc = func(volumeIds []string) ([]error, error) { 1564 releasedChan <- volumeIds 1565 return make([]error, len(volumeIds)), nil 1566 } 1567 1568 removedChan := make(chan interface{}, 1) 1569 remove := func(tags []names.Tag) ([]params.ErrorResult, error) { 1570 removedChan <- tags 1571 return make([]params.ErrorResult, len(tags)), nil 1572 } 1573 1574 args := &workerArgs{ 1575 volumes: volumeAccessor, 1576 life: &mockLifecycleManager{ 1577 life: life, 1578 remove: remove, 1579 }, 1580 registry: s.registry, 1581 } 1582 worker := newStorageProvisioner(c, args) 1583 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 1584 defer worker.Kill() 1585 1586 volumeAccessor.volumesWatcher.changes <- []string{ 1587 unprovisionedVolume.Id(), 1588 provisionedDestroyVolume.Id(), 1589 provisionedReleaseVolume.Id(), 1590 } 1591 1592 // All volumes should be removed; the provisioned ones 1593 // should be destroyed/released first. 1594 1595 destroyed := waitChannel(c, destroyedChan, "waiting for volume to be destroyed") 1596 assertNoEvent(c, destroyedChan, "volumes destroyed") 1597 c.Assert(destroyed, jc.DeepEquals, []string{"vol-1"}) 1598 1599 released := waitChannel(c, releasedChan, "waiting for volume to be released") 1600 assertNoEvent(c, releasedChan, "volumes released") 1601 c.Assert(released, jc.DeepEquals, []string{"vol-2"}) 1602 1603 var removed []names.Tag 1604 for len(removed) < 3 { 1605 tags := waitChannel(c, removedChan, "waiting for volumes to be removed").([]names.Tag) 1606 removed = append(removed, tags...) 1607 } 1608 c.Assert(removed, jc.SameContents, []names.Tag{ 1609 unprovisionedVolume, 1610 provisionedDestroyVolume, 1611 provisionedReleaseVolume, 1612 }) 1613 assertNoEvent(c, removedChan, "volumes removed") 1614 } 1615 1616 func (s *storageProvisionerSuite) TestDestroyVolumesRetry(c *gc.C) { 1617 volume := names.NewVolumeTag("1") 1618 volumeAccessor := newMockVolumeAccessor() 1619 volumeAccessor.provisionVolume(volume) 1620 1621 life := func(tags []names.Tag) ([]params.LifeResult, error) { 1622 return []params.LifeResult{{Life: params.Dead}}, nil 1623 } 1624 1625 // mockFunc's After will progress the current time by the specified 1626 // duration and signal the channel immediately. 1627 clock := &mockClock{} 1628 var destroyVolumeTimes []time.Time 1629 1630 s.provider.destroyVolumesFunc = func(volumeIds []string) ([]error, error) { 1631 destroyVolumeTimes = append(destroyVolumeTimes, clock.Now()) 1632 if len(destroyVolumeTimes) < 10 { 1633 return []error{errors.New("badness")}, nil 1634 } 1635 return []error{nil}, nil 1636 } 1637 1638 removedChan := make(chan interface{}, 1) 1639 remove := func(tags []names.Tag) ([]params.ErrorResult, error) { 1640 removedChan <- tags 1641 return make([]params.ErrorResult, len(tags)), nil 1642 } 1643 1644 args := &workerArgs{ 1645 volumes: volumeAccessor, 1646 clock: clock, 1647 life: &mockLifecycleManager{ 1648 life: life, 1649 remove: remove, 1650 }, 1651 registry: s.registry, 1652 } 1653 worker := newStorageProvisioner(c, args) 1654 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 1655 defer worker.Kill() 1656 1657 volumeAccessor.volumesWatcher.changes <- []string{volume.Id()} 1658 waitChannel(c, removedChan, "waiting for volume to be removed") 1659 c.Assert(destroyVolumeTimes, gc.HasLen, 10) 1660 1661 // The first attempt should have been immediate: T0. 1662 c.Assert(destroyVolumeTimes[0], gc.Equals, time.Time{}) 1663 1664 delays := make([]time.Duration, len(destroyVolumeTimes)-1) 1665 for i := range destroyVolumeTimes[1:] { 1666 delays[i] = destroyVolumeTimes[i+1].Sub(destroyVolumeTimes[i]) 1667 } 1668 c.Assert(delays, jc.DeepEquals, []time.Duration{ 1669 30 * time.Second, 1670 1 * time.Minute, 1671 2 * time.Minute, 1672 4 * time.Minute, 1673 8 * time.Minute, 1674 16 * time.Minute, 1675 30 * time.Minute, // ceiling reached 1676 30 * time.Minute, 1677 30 * time.Minute, 1678 }) 1679 1680 c.Assert(args.statusSetter.args, jc.DeepEquals, []params.EntityStatusArgs{ 1681 {Tag: "volume-1", Status: "error", Info: "destroying volume: badness"}, 1682 {Tag: "volume-1", Status: "error", Info: "destroying volume: badness"}, 1683 {Tag: "volume-1", Status: "error", Info: "destroying volume: badness"}, 1684 {Tag: "volume-1", Status: "error", Info: "destroying volume: badness"}, 1685 {Tag: "volume-1", Status: "error", Info: "destroying volume: badness"}, 1686 {Tag: "volume-1", Status: "error", Info: "destroying volume: badness"}, 1687 {Tag: "volume-1", Status: "error", Info: "destroying volume: badness"}, 1688 {Tag: "volume-1", Status: "error", Info: "destroying volume: badness"}, 1689 {Tag: "volume-1", Status: "error", Info: "destroying volume: badness"}, 1690 }) 1691 } 1692 1693 func (s *storageProvisionerSuite) TestDestroyFilesystems(c *gc.C) { 1694 unprovisionedFilesystem := names.NewFilesystemTag("0") 1695 provisionedDestroyFilesystem := names.NewFilesystemTag("1") 1696 provisionedReleaseFilesystem := names.NewFilesystemTag("2") 1697 1698 filesystemAccessor := newMockFilesystemAccessor() 1699 filesystemAccessor.provisionFilesystem(provisionedDestroyFilesystem) 1700 filesystemAccessor.provisionFilesystem(provisionedReleaseFilesystem) 1701 1702 life := func(tags []names.Tag) ([]params.LifeResult, error) { 1703 results := make([]params.LifeResult, len(tags)) 1704 for i := range results { 1705 results[i].Life = params.Dead 1706 } 1707 return results, nil 1708 } 1709 1710 destroyedChan := make(chan interface{}, 1) 1711 s.provider.destroyFilesystemsFunc = func(filesystemIds []string) ([]error, error) { 1712 destroyedChan <- filesystemIds 1713 return make([]error, len(filesystemIds)), nil 1714 } 1715 1716 releasedChan := make(chan interface{}, 1) 1717 s.provider.releaseFilesystemsFunc = func(filesystemIds []string) ([]error, error) { 1718 releasedChan <- filesystemIds 1719 return make([]error, len(filesystemIds)), nil 1720 } 1721 1722 removedChan := make(chan interface{}, 1) 1723 remove := func(tags []names.Tag) ([]params.ErrorResult, error) { 1724 removedChan <- tags 1725 return make([]params.ErrorResult, len(tags)), nil 1726 } 1727 1728 args := &workerArgs{ 1729 filesystems: filesystemAccessor, 1730 life: &mockLifecycleManager{ 1731 life: life, 1732 remove: remove, 1733 }, 1734 registry: s.registry, 1735 } 1736 worker := newStorageProvisioner(c, args) 1737 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 1738 defer worker.Kill() 1739 1740 filesystemAccessor.filesystemsWatcher.changes <- []string{ 1741 unprovisionedFilesystem.Id(), 1742 provisionedDestroyFilesystem.Id(), 1743 provisionedReleaseFilesystem.Id(), 1744 } 1745 1746 // Both filesystems should be removed; the provisioned ones 1747 // should be destroyed/released first. 1748 1749 destroyed := waitChannel(c, destroyedChan, "waiting for filesystem to be destroyed") 1750 assertNoEvent(c, destroyedChan, "filesystems destroyed") 1751 c.Assert(destroyed, jc.DeepEquals, []string{"fs-1"}) 1752 1753 released := waitChannel(c, releasedChan, "waiting for filesystem to be released") 1754 assertNoEvent(c, releasedChan, "filesystems released") 1755 c.Assert(released, jc.DeepEquals, []string{"fs-2"}) 1756 1757 var removed []names.Tag 1758 for len(removed) < 3 { 1759 tags := waitChannel(c, removedChan, "waiting for filesystems to be removed").([]names.Tag) 1760 removed = append(removed, tags...) 1761 } 1762 c.Assert(removed, jc.SameContents, []names.Tag{ 1763 unprovisionedFilesystem, 1764 provisionedDestroyFilesystem, 1765 provisionedReleaseFilesystem, 1766 }) 1767 assertNoEvent(c, removedChan, "filesystems removed") 1768 } 1769 1770 func (s *storageProvisionerSuite) TestDestroyFilesystemsRetry(c *gc.C) { 1771 provisionedDestroyFilesystem := names.NewFilesystemTag("0") 1772 1773 filesystemAccessor := newMockFilesystemAccessor() 1774 filesystemAccessor.provisionFilesystem(provisionedDestroyFilesystem) 1775 1776 life := func(tags []names.Tag) ([]params.LifeResult, error) { 1777 return []params.LifeResult{{Life: params.Dead}}, nil 1778 } 1779 1780 // mockFunc's After will progress the current time by the specified 1781 // duration and signal the channel immediately. 1782 clock := &mockClock{} 1783 var destroyFilesystemTimes []time.Time 1784 s.provider.destroyFilesystemsFunc = func(filesystemIds []string) ([]error, error) { 1785 destroyFilesystemTimes = append(destroyFilesystemTimes, clock.Now()) 1786 if len(destroyFilesystemTimes) < 10 { 1787 return []error{errors.New("destroyFilesystems failed, please retry later")}, nil 1788 } 1789 return []error{nil}, nil 1790 } 1791 1792 removedChan := make(chan interface{}, 1) 1793 remove := func(tags []names.Tag) ([]params.ErrorResult, error) { 1794 removedChan <- tags 1795 return make([]params.ErrorResult, len(tags)), nil 1796 } 1797 1798 args := &workerArgs{ 1799 filesystems: filesystemAccessor, 1800 clock: clock, 1801 life: &mockLifecycleManager{ 1802 life: life, 1803 remove: remove, 1804 }, 1805 registry: s.registry, 1806 } 1807 worker := newStorageProvisioner(c, args) 1808 defer func() { c.Assert(worker.Wait(), gc.IsNil) }() 1809 defer worker.Kill() 1810 1811 filesystemAccessor.filesystemsWatcher.changes <- []string{ 1812 provisionedDestroyFilesystem.Id(), 1813 } 1814 1815 waitChannel(c, removedChan, "waiting for filesystem to be removed") 1816 c.Assert(destroyFilesystemTimes, gc.HasLen, 10) 1817 1818 // The first attempt should have been immediate: T0. 1819 c.Assert(destroyFilesystemTimes[0], gc.Equals, time.Time{}) 1820 1821 delays := make([]time.Duration, len(destroyFilesystemTimes)-1) 1822 for i := range destroyFilesystemTimes[1:] { 1823 delays[i] = destroyFilesystemTimes[i+1].Sub(destroyFilesystemTimes[i]) 1824 } 1825 c.Assert(delays, jc.DeepEquals, []time.Duration{ 1826 30 * time.Second, 1827 1 * time.Minute, 1828 2 * time.Minute, 1829 4 * time.Minute, 1830 8 * time.Minute, 1831 16 * time.Minute, 1832 30 * time.Minute, // ceiling reached 1833 30 * time.Minute, 1834 30 * time.Minute, 1835 }) 1836 1837 c.Assert(args.statusSetter.args, jc.DeepEquals, []params.EntityStatusArgs{ 1838 {Tag: "filesystem-0", Status: "error", Info: "removing filesystem: destroyFilesystems failed, please retry later"}, 1839 {Tag: "filesystem-0", Status: "error", Info: "removing filesystem: destroyFilesystems failed, please retry later"}, 1840 {Tag: "filesystem-0", Status: "error", Info: "removing filesystem: destroyFilesystems failed, please retry later"}, 1841 {Tag: "filesystem-0", Status: "error", Info: "removing filesystem: destroyFilesystems failed, please retry later"}, 1842 {Tag: "filesystem-0", Status: "error", Info: "removing filesystem: destroyFilesystems failed, please retry later"}, 1843 {Tag: "filesystem-0", Status: "error", Info: "removing filesystem: destroyFilesystems failed, please retry later"}, 1844 {Tag: "filesystem-0", Status: "error", Info: "removing filesystem: destroyFilesystems failed, please retry later"}, 1845 {Tag: "filesystem-0", Status: "error", Info: "removing filesystem: destroyFilesystems failed, please retry later"}, 1846 {Tag: "filesystem-0", Status: "error", Info: "removing filesystem: destroyFilesystems failed, please retry later"}, 1847 }) 1848 } 1849 1850 type caasStorageProvisionerSuite struct { 1851 coretesting.BaseSuite 1852 provider *dummyProvider 1853 registry storage.ProviderRegistry 1854 } 1855 1856 var _ = gc.Suite(&caasStorageProvisionerSuite{}) 1857 1858 func (s *caasStorageProvisionerSuite) SetUpTest(c *gc.C) { 1859 s.BaseSuite.SetUpTest(c) 1860 s.provider = &dummyProvider{dynamic: true} 1861 s.registry = storage.StaticProviderRegistry{ 1862 map[storage.ProviderType]storage.Provider{ 1863 "dummy": s.provider, 1864 }, 1865 } 1866 } 1867 1868 func (s *caasStorageProvisionerSuite) TestDetachVolumesUnattached(c *gc.C) { 1869 removed := make(chan interface{}) 1870 removeAttachments := func(ids []params.MachineStorageId) ([]params.ErrorResult, error) { 1871 defer close(removed) 1872 c.Assert(ids, gc.DeepEquals, []params.MachineStorageId{{ 1873 MachineTag: "unit-mariadb-0", 1874 AttachmentTag: "volume-0", 1875 }}) 1876 return make([]params.ErrorResult, len(ids)), nil 1877 } 1878 1879 args := &workerArgs{ 1880 life: &mockLifecycleManager{removeAttachments: removeAttachments}, 1881 registry: s.registry, 1882 } 1883 w := newStorageProvisioner(c, args) 1884 defer w.Wait() 1885 defer w.Kill() 1886 1887 args.volumes.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ 1888 MachineTag: "unit-mariadb-0", AttachmentTag: "volume-0", 1889 }} 1890 waitChannel(c, removed, "waiting for attachment to be removed") 1891 } 1892 1893 func (s *caasStorageProvisionerSuite) TestDetachVolumes(c *gc.C) { 1894 volumeAccessor := newMockVolumeAccessor() 1895 1896 expectedAttachmentIds := []params.MachineStorageId{{ 1897 MachineTag: "unit-mariadb-1", AttachmentTag: "volume-1", 1898 }} 1899 1900 attachmentLife := func(ids []params.MachineStorageId) ([]params.LifeResult, error) { 1901 return []params.LifeResult{{Life: params.Dying}}, nil 1902 } 1903 1904 detached := make(chan interface{}) 1905 s.provider.detachVolumesFunc = func(args []storage.VolumeAttachmentParams) ([]error, error) { 1906 c.Assert(args, gc.HasLen, 1) 1907 c.Assert(args[0].Machine.String(), gc.Equals, expectedAttachmentIds[0].MachineTag) 1908 c.Assert(args[0].Volume.String(), gc.Equals, expectedAttachmentIds[0].AttachmentTag) 1909 defer close(detached) 1910 return make([]error, len(args)), nil 1911 } 1912 1913 args := &workerArgs{ 1914 volumes: volumeAccessor, 1915 life: &mockLifecycleManager{ 1916 attachmentLife: attachmentLife, 1917 }, 1918 registry: s.registry, 1919 } 1920 w := newStorageProvisioner(c, args) 1921 defer func() { c.Assert(w.Wait(), gc.IsNil) }() 1922 defer w.Kill() 1923 1924 volumeAccessor.provisionedAttachments[expectedAttachmentIds[0]] = params.VolumeAttachment{ 1925 MachineTag: "unit-mariadb-1", 1926 VolumeTag: "volume-1", 1927 } 1928 volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ 1929 MachineTag: "unit-mariadb-1", AttachmentTag: "volume-1", 1930 }} 1931 waitChannel(c, detached, "waiting for volume to be detached") 1932 } 1933 1934 func (s *caasStorageProvisionerSuite) TestRemoveVolumes(c *gc.C) { 1935 volumeAccessor := newMockVolumeAccessor() 1936 1937 expectedAttachmentIds := []params.MachineStorageId{{ 1938 MachineTag: "unit-mariadb-1", AttachmentTag: "volume-1", 1939 }} 1940 1941 attachmentLife := func(ids []params.MachineStorageId) ([]params.LifeResult, error) { 1942 return []params.LifeResult{{Life: params.Dying}}, nil 1943 } 1944 1945 removed := make(chan interface{}) 1946 removeAttachments := func(ids []params.MachineStorageId) ([]params.ErrorResult, error) { 1947 c.Assert(ids, gc.DeepEquals, expectedAttachmentIds) 1948 close(removed) 1949 return make([]params.ErrorResult, len(ids)), nil 1950 } 1951 1952 args := &workerArgs{ 1953 volumes: volumeAccessor, 1954 life: &mockLifecycleManager{ 1955 attachmentLife: attachmentLife, 1956 removeAttachments: removeAttachments, 1957 }, 1958 registry: s.registry, 1959 } 1960 w := newStorageProvisioner(c, args) 1961 defer func() { c.Assert(w.Wait(), gc.IsNil) }() 1962 defer w.Kill() 1963 1964 volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ 1965 MachineTag: "unit-mariadb-1", AttachmentTag: "volume-1", 1966 }} 1967 waitChannel(c, removed, "waiting for attachment to be removed") 1968 } 1969 1970 func (s *caasStorageProvisionerSuite) TestDetachFilesystems(c *gc.C) { 1971 removed := make(chan interface{}) 1972 removeAttachments := func(ids []params.MachineStorageId) ([]params.ErrorResult, error) { 1973 defer close(removed) 1974 c.Assert(ids, gc.DeepEquals, []params.MachineStorageId{{ 1975 MachineTag: "unit-mariadb-0", 1976 AttachmentTag: "filesystem-0", 1977 }}) 1978 return make([]params.ErrorResult, len(ids)), nil 1979 } 1980 1981 args := &workerArgs{ 1982 life: &mockLifecycleManager{removeAttachments: removeAttachments}, 1983 registry: s.registry, 1984 } 1985 w := newStorageProvisioner(c, args) 1986 defer w.Wait() 1987 defer w.Kill() 1988 1989 args.filesystems.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ 1990 MachineTag: "unit-mariadb-0", AttachmentTag: "filesystem-0", 1991 }} 1992 waitChannel(c, removed, "waiting for attachment to be removed") 1993 } 1994 1995 func (s *caasStorageProvisionerSuite) TestRemoveFilesystems(c *gc.C) { 1996 filesystemAccessor := newMockFilesystemAccessor() 1997 1998 expectedAttachmentIds := []params.MachineStorageId{{ 1999 MachineTag: "unit-mariadb-1", AttachmentTag: "filesystem-1", 2000 }} 2001 2002 attachmentLife := func(ids []params.MachineStorageId) ([]params.LifeResult, error) { 2003 c.Assert(ids, gc.DeepEquals, expectedAttachmentIds) 2004 return []params.LifeResult{{Life: params.Dying}}, nil 2005 } 2006 2007 removed := make(chan interface{}) 2008 removeAttachments := func(ids []params.MachineStorageId) ([]params.ErrorResult, error) { 2009 c.Assert(ids, gc.DeepEquals, expectedAttachmentIds) 2010 close(removed) 2011 return make([]params.ErrorResult, len(ids)), nil 2012 } 2013 2014 args := &workerArgs{ 2015 filesystems: filesystemAccessor, 2016 life: &mockLifecycleManager{ 2017 attachmentLife: attachmentLife, 2018 removeAttachments: removeAttachments, 2019 }, 2020 registry: s.registry, 2021 } 2022 w := newStorageProvisioner(c, args) 2023 defer func() { c.Assert(w.Wait(), gc.IsNil) }() 2024 defer w.Kill() 2025 2026 filesystemAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ 2027 MachineTag: "unit-mariadb-1", AttachmentTag: "filesystem-1", 2028 }} 2029 waitChannel(c, removed, "waiting for filesystem to be removed") 2030 } 2031 2032 func newStorageProvisioner(c *gc.C, args *workerArgs) worker.Worker { 2033 if args == nil { 2034 args = &workerArgs{} 2035 } 2036 var storageDir string 2037 switch args.scope.(type) { 2038 case names.MachineTag: 2039 storageDir = "storage-dir" 2040 case names.ModelTag: 2041 case nil: 2042 args.scope = coretesting.ModelTag 2043 } 2044 if args.volumes == nil { 2045 args.volumes = newMockVolumeAccessor() 2046 } 2047 if args.filesystems == nil { 2048 args.filesystems = newMockFilesystemAccessor() 2049 } 2050 if args.life == nil { 2051 args.life = &mockLifecycleManager{} 2052 } 2053 if args.machines == nil { 2054 args.machines = newMockMachineAccessor(c) 2055 } 2056 if args.clock == nil { 2057 args.clock = &mockClock{} 2058 } 2059 if args.statusSetter == nil { 2060 args.statusSetter = &mockStatusSetter{} 2061 } 2062 worker, err := storageprovisioner.NewStorageProvisioner(storageprovisioner.Config{ 2063 Scope: args.scope, 2064 StorageDir: storageDir, 2065 Volumes: args.volumes, 2066 Filesystems: args.filesystems, 2067 Life: args.life, 2068 Registry: args.registry, 2069 Machines: args.machines, 2070 Status: args.statusSetter, 2071 Clock: args.clock, 2072 CloudCallContext: context.NewCloudCallContext(), 2073 }) 2074 c.Assert(err, jc.ErrorIsNil) 2075 return worker 2076 } 2077 2078 type workerArgs struct { 2079 scope names.Tag 2080 volumes *mockVolumeAccessor 2081 filesystems *mockFilesystemAccessor 2082 life *mockLifecycleManager 2083 registry storage.ProviderRegistry 2084 machines *mockMachineAccessor 2085 clock clock.Clock 2086 statusSetter *mockStatusSetter 2087 } 2088 2089 func waitChannel(c *gc.C, ch <-chan interface{}, activity string) interface{} { 2090 select { 2091 case v := <-ch: 2092 return v 2093 case <-time.After(coretesting.LongWait): 2094 c.Fatalf("timed out " + activity) 2095 panic("unreachable") 2096 } 2097 } 2098 2099 func assertNoEvent(c *gc.C, ch <-chan interface{}, event string) { 2100 select { 2101 case <-ch: 2102 c.Fatalf("unexpected " + event) 2103 case <-time.After(coretesting.ShortWait): 2104 } 2105 }