github.com/niedbalski/juju@v0.0.0-20190215020005-8ff100488e47/worker/storageprovisioner/volume_events.go (about) 1 // Copyright 2015 Canonical Ltd. 2 // Licensed under the AGPLv3, see LICENCE file for details. 3 4 package storageprovisioner 5 6 import ( 7 "github.com/juju/errors" 8 "gopkg.in/juju/names.v2" 9 10 "github.com/juju/juju/apiserver/params" 11 "github.com/juju/juju/core/instance" 12 "github.com/juju/juju/core/watcher" 13 "github.com/juju/juju/storage" 14 "github.com/juju/juju/storage/plans" 15 ) 16 17 // volumesChanged is called when the lifecycle states of the volumes 18 // with the provided IDs have been seen to have changed. 19 func volumesChanged(ctx *context, changes []string) error { 20 tags := make([]names.Tag, len(changes)) 21 for i, change := range changes { 22 tags[i] = names.NewVolumeTag(change) 23 } 24 alive, dying, dead, err := storageEntityLife(ctx, tags) 25 if err != nil { 26 return errors.Trace(err) 27 } 28 logger.Debugf("volumes alive: %v, dying: %v, dead: %v", alive, dying, dead) 29 if err := processDyingVolumes(ctx, dying); err != nil { 30 return errors.Annotate(err, "processing dying volumes") 31 } 32 if len(alive)+len(dead) == 0 { 33 return nil 34 } 35 36 // Get volume information for alive and dead volumes, so 37 // we can provision/deprovision. 38 volumeTags := make([]names.VolumeTag, 0, len(alive)+len(dead)) 39 for _, tag := range alive { 40 volumeTags = append(volumeTags, tag.(names.VolumeTag)) 41 } 42 for _, tag := range dead { 43 volumeTags = append(volumeTags, tag.(names.VolumeTag)) 44 } 45 volumeResults, err := ctx.config.Volumes.Volumes(volumeTags) 46 if err != nil { 47 return errors.Annotatef(err, "getting volume information") 48 } 49 if err := processDeadVolumes(ctx, volumeTags[len(alive):], volumeResults[len(alive):]); err != nil { 50 return errors.Annotate(err, "deprovisioning volumes") 51 } 52 if err := processAliveVolumes(ctx, alive, volumeResults[:len(alive)]); err != nil { 53 return errors.Annotate(err, "provisioning volumes") 54 } 55 return nil 56 } 57 58 func sortVolumeAttachmentPlans(ctx *context, ids []params.MachineStorageId) ( 59 alive, dying, dead []params.VolumeAttachmentPlanResult, err error) { 60 plans, err := ctx.config.Volumes.VolumeAttachmentPlans(ids) 61 if err != nil { 62 return nil, nil, nil, errors.Trace(err) 63 } 64 logger.Debugf("Found plans: %v", plans) 65 for _, plan := range plans { 66 switch plan.Result.Life { 67 case params.Alive: 68 alive = append(alive, plan) 69 case params.Dying: 70 dying = append(dying, plan) 71 case params.Dead: 72 dead = append(dead, plan) 73 } 74 } 75 return 76 } 77 78 func volumeAttachmentPlansChanged(ctx *context, watcherIds []watcher.MachineStorageId) error { 79 logger.Debugf("Got machine storage ids: %v", watcherIds) 80 ids := copyMachineStorageIds(watcherIds) 81 alive, dying, dead, err := sortVolumeAttachmentPlans(ctx, ids) 82 if err != nil { 83 return errors.Trace(err) 84 } 85 logger.Debugf("volume attachment plans alive: %v, dying: %v, dead: %v", alive, dying, dead) 86 87 if err := processAliveVolumePlans(ctx, alive); err != nil { 88 return err 89 } 90 91 if err := processDyingVolumePlans(ctx, dying); err != nil { 92 return err 93 } 94 return nil 95 } 96 97 func processAliveVolumePlans(ctx *context, volumePlans []params.VolumeAttachmentPlanResult) error { 98 volumeAttachmentPlans := make([]params.VolumeAttachmentPlan, len(volumePlans)) 99 volumeTags := make([]names.VolumeTag, len(volumePlans)) 100 for i, val := range volumePlans { 101 volumeAttachmentPlans[i] = val.Result 102 tag, err := names.ParseVolumeTag(val.Result.VolumeTag) 103 if err != nil { 104 return errors.Trace(err) 105 } 106 volumeTags[i] = tag 107 } 108 109 for idx, val := range volumeAttachmentPlans { 110 volPlan, err := plans.PlanByType(val.PlanInfo.DeviceType) 111 if err != nil { 112 if !errors.IsNotFound(err) { 113 return errors.Trace(err) 114 } 115 continue 116 } 117 if blockDeviceInfo, err := volPlan.AttachVolume(val.PlanInfo.DeviceAttributes); err != nil { 118 return errors.Trace(err) 119 } else { 120 volumeAttachmentPlans[idx].BlockDevice = blockDeviceInfo 121 } 122 } 123 124 results, err := ctx.config.Volumes.SetVolumeAttachmentPlanBlockInfo(volumeAttachmentPlans) 125 if err != nil { 126 return errors.Trace(err) 127 } 128 for _, result := range results { 129 if result.Error != nil { 130 return errors.Errorf("failed to publish block info to state: %s", result.Error) 131 } 132 } 133 return refreshVolumeBlockDevices(ctx, volumeTags) 134 } 135 136 func processDyingVolumePlans(ctx *context, volumePlans []params.VolumeAttachmentPlanResult) error { 137 ids := volumePlansToMachineIds(volumePlans) 138 for _, val := range volumePlans { 139 volPlan, err := plans.PlanByType(val.Result.PlanInfo.DeviceType) 140 if err != nil { 141 if !errors.IsNotFound(err) { 142 return errors.Trace(err) 143 } 144 continue 145 } 146 if err := volPlan.DetachVolume(val.Result.PlanInfo.DeviceAttributes); err != nil { 147 return errors.Trace(err) 148 } 149 } 150 results, err := ctx.config.Volumes.RemoveVolumeAttachmentPlan(ids) 151 if err != nil { 152 return err 153 } 154 for _, result := range results { 155 if result.Error != nil { 156 return errors.Annotate(result.Error, "removing volume plan") 157 } 158 } 159 return nil 160 } 161 162 func volumePlansToMachineIds(plans []params.VolumeAttachmentPlanResult) []params.MachineStorageId { 163 storageIds := make([]params.MachineStorageId, len(plans)) 164 for i, plan := range plans { 165 storageIds[i] = params.MachineStorageId{ 166 MachineTag: plan.Result.MachineTag, 167 AttachmentTag: plan.Result.VolumeTag, 168 } 169 } 170 return storageIds 171 } 172 173 // volumeAttachmentsChanged is called when the lifecycle states of the volume 174 // attachments with the provided IDs have been seen to have changed. 175 func volumeAttachmentsChanged(ctx *context, watcherIds []watcher.MachineStorageId) error { 176 ids := copyMachineStorageIds(watcherIds) 177 alive, dying, dead, err := attachmentLife(ctx, ids) 178 if err != nil { 179 return errors.Trace(err) 180 } 181 logger.Debugf("volume attachments alive: %v, dying: %v, dead: %v", alive, dying, dead) 182 if len(dead) != 0 { 183 // We should not see dead volume attachments; 184 // attachments go directly from Dying to removed. 185 logger.Warningf("unexpected dead volume attachments: %v", dead) 186 } 187 if len(alive)+len(dying) == 0 { 188 return nil 189 } 190 191 // Get volume information for alive and dying volume attachments, so 192 // we can attach/detach. 193 ids = append(alive, dying...) 194 volumeAttachmentResults, err := ctx.config.Volumes.VolumeAttachments(ids) 195 if err != nil { 196 return errors.Annotatef(err, "getting volume attachment information") 197 } 198 199 // Deprovision Dying volume attachments. 200 dyingVolumeAttachmentResults := volumeAttachmentResults[len(alive):] 201 if err := processDyingVolumeAttachments(ctx, dying, dyingVolumeAttachmentResults); err != nil { 202 return errors.Annotate(err, "deprovisioning volume attachments") 203 } 204 205 // Provision Alive volume attachments. 206 aliveVolumeAttachmentResults := volumeAttachmentResults[:len(alive)] 207 if err := processAliveVolumeAttachments(ctx, alive, aliveVolumeAttachmentResults); err != nil { 208 return errors.Annotate(err, "provisioning volumes") 209 } 210 211 return nil 212 } 213 214 // processDyingVolumes processes the VolumeResults for Dying volumes, 215 // removing them from provisioning-pending as necessary. 216 func processDyingVolumes(ctx *context, tags []names.Tag) error { 217 if ctx.isApplicationKind() { 218 // only care dead for application. 219 return nil 220 } 221 for _, tag := range tags { 222 removePendingVolume(ctx, tag.(names.VolumeTag)) 223 } 224 return nil 225 } 226 227 // updateVolume updates the context with the given volume info. 228 func updateVolume(ctx *context, info storage.Volume) { 229 ctx.volumes[info.Tag] = info 230 for id, params := range ctx.incompleteVolumeAttachmentParams { 231 if params.VolumeId == "" && id.AttachmentTag == info.Tag.String() { 232 params.VolumeId = info.VolumeId 233 updatePendingVolumeAttachment(ctx, id, params) 234 } 235 } 236 } 237 238 // updatePendingVolume adds the given volume params to either the incomplete 239 // set or the schedule. If the params are incomplete due to a missing instance 240 // ID, updatePendingVolume will request that the machine be watched so its 241 // instance ID can be learned. 242 func updatePendingVolume(ctx *context, params storage.VolumeParams) { 243 if params.Attachment == nil { 244 // NOTE(axw) this would only happen if the model is 245 // in an incoherent state; we should never have an 246 // alive, unprovisioned, and unattached volume. 247 logger.Warningf( 248 "%s is in an incoherent state, ignoring", 249 names.ReadableString(params.Tag), 250 ) 251 return 252 } 253 if params.Attachment.InstanceId == "" { 254 watchMachine(ctx, params.Attachment.Machine.(names.MachineTag)) 255 ctx.incompleteVolumeParams[params.Tag] = params 256 } else { 257 delete(ctx.incompleteVolumeParams, params.Tag) 258 scheduleOperations(ctx, &createVolumeOp{args: params}) 259 } 260 } 261 262 // removePendingVolume removes the specified pending volume from the 263 // incomplete set and/or the schedule if it exists there. 264 func removePendingVolume(ctx *context, tag names.VolumeTag) { 265 delete(ctx.incompleteVolumeParams, tag) 266 ctx.schedule.Remove(tag) 267 } 268 269 // updatePendingVolumeAttachment adds the given volume attachment params to 270 // either the incomplete set or the schedule. If the params are incomplete 271 // due to a missing instance ID, updatePendingVolumeAttachment will request 272 // that the machine be watched so its instance ID can be learned. 273 func updatePendingVolumeAttachment( 274 ctx *context, 275 id params.MachineStorageId, 276 params storage.VolumeAttachmentParams, 277 ) { 278 if params.InstanceId == "" { 279 watchMachine(ctx, params.Machine.(names.MachineTag)) 280 } else if params.VolumeId != "" { 281 delete(ctx.incompleteVolumeAttachmentParams, id) 282 scheduleOperations(ctx, &attachVolumeOp{args: params}) 283 return 284 } 285 ctx.incompleteVolumeAttachmentParams[id] = params 286 } 287 288 // removePendingVolumeAttachment removes the specified pending volume 289 // attachment from the incomplete set and/or the schedule if it exists 290 // there. 291 func removePendingVolumeAttachment(ctx *context, id params.MachineStorageId) { 292 delete(ctx.incompleteVolumeAttachmentParams, id) 293 ctx.schedule.Remove(id) 294 } 295 296 // processDeadVolumes processes the VolumeResults for Dead volumes, 297 // deprovisioning volumes and removing from state as necessary. 298 func processDeadVolumes(ctx *context, tags []names.VolumeTag, volumeResults []params.VolumeResult) error { 299 for _, tag := range tags { 300 removePendingVolume(ctx, tag) 301 } 302 var destroy []names.VolumeTag 303 var remove []names.Tag 304 for i, result := range volumeResults { 305 tag := tags[i] 306 if result.Error == nil { 307 logger.Debugf("volume %s is provisioned, queuing for deprovisioning", tag.Id()) 308 volume, err := volumeFromParams(result.Result) 309 if err != nil { 310 return errors.Annotate(err, "getting volume info") 311 } 312 updateVolume(ctx, volume) 313 destroy = append(destroy, tag) 314 continue 315 } 316 if params.IsCodeNotProvisioned(result.Error) { 317 logger.Debugf("volume %s is not provisioned, queuing for removal", tag.Id()) 318 remove = append(remove, tag) 319 continue 320 } 321 return errors.Annotatef(result.Error, "getting volume information for volume %s", tag.Id()) 322 } 323 if len(destroy) > 0 { 324 ops := make([]scheduleOp, len(destroy)) 325 for i, tag := range destroy { 326 ops[i] = &removeVolumeOp{tag: tag} 327 } 328 scheduleOperations(ctx, ops...) 329 } 330 if err := removeEntities(ctx, remove); err != nil { 331 return errors.Annotate(err, "removing volumes from state") 332 } 333 return nil 334 } 335 336 // processDyingVolumeAttachments processes the VolumeAttachmentResults for 337 // Dying volume attachments, detaching volumes and updating state as necessary. 338 func processDyingVolumeAttachments( 339 ctx *context, 340 ids []params.MachineStorageId, 341 volumeAttachmentResults []params.VolumeAttachmentResult, 342 ) error { 343 for _, id := range ids { 344 removePendingVolumeAttachment(ctx, id) 345 } 346 detach := make([]params.MachineStorageId, 0, len(ids)) 347 remove := make([]params.MachineStorageId, 0, len(ids)) 348 for i, result := range volumeAttachmentResults { 349 id := ids[i] 350 if result.Error == nil { 351 detach = append(detach, id) 352 continue 353 } 354 if params.IsCodeNotProvisioned(result.Error) { 355 remove = append(remove, id) 356 continue 357 } 358 return errors.Annotatef(result.Error, "getting information for volume attachment %v", id) 359 } 360 if len(detach) > 0 { 361 attachmentParams, err := volumeAttachmentParams(ctx, detach) 362 if err != nil { 363 return errors.Trace(err) 364 } 365 ops := make([]scheduleOp, len(attachmentParams)) 366 for i, p := range attachmentParams { 367 ops[i] = &detachVolumeOp{args: p} 368 } 369 scheduleOperations(ctx, ops...) 370 } 371 if err := removeAttachments(ctx, remove); err != nil { 372 return errors.Annotate(err, "removing attachments from state") 373 } 374 for _, id := range remove { 375 delete(ctx.volumeAttachments, id) 376 } 377 return nil 378 } 379 380 // processAliveVolumes processes the VolumeResults for Alive volumes, 381 // provisioning volumes and setting the info in state as necessary. 382 func processAliveVolumes(ctx *context, tags []names.Tag, volumeResults []params.VolumeResult) error { 383 if ctx.isApplicationKind() { 384 // only care dead for application kind. 385 return nil 386 } 387 388 // Filter out the already-provisioned volumes. 389 pending := make([]names.VolumeTag, 0, len(tags)) 390 for i, result := range volumeResults { 391 volumeTag := tags[i].(names.VolumeTag) 392 if result.Error == nil { 393 // Volume is already provisioned: skip. 394 logger.Debugf("volume %q is already provisioned, nothing to do", tags[i].Id()) 395 volume, err := volumeFromParams(result.Result) 396 if err != nil { 397 return errors.Annotate(err, "getting volume info") 398 } 399 updateVolume(ctx, volume) 400 removePendingVolume(ctx, volumeTag) 401 continue 402 } 403 if !params.IsCodeNotProvisioned(result.Error) { 404 return errors.Annotatef( 405 result.Error, "getting volume information for volume %q", tags[i].Id(), 406 ) 407 } 408 // The volume has not yet been provisioned, so record its tag 409 // to enquire about parameters below. 410 pending = append(pending, volumeTag) 411 } 412 if len(pending) == 0 { 413 return nil 414 } 415 volumeParams, err := volumeParams(ctx, pending) 416 if err != nil { 417 return errors.Annotate(err, "getting volume params") 418 } 419 for _, params := range volumeParams { 420 if params.Attachment != nil && params.Attachment.Machine.Kind() != names.MachineTagKind { 421 logger.Debugf("not queuing volume for non-machine %v", params.Attachment.Machine) 422 continue 423 } 424 updatePendingVolume(ctx, params) 425 } 426 return nil 427 } 428 429 // processAliveVolumeAttachments processes the VolumeAttachmentResults 430 // for Alive volume attachments, attaching volumes and setting the info 431 // in state as necessary. 432 func processAliveVolumeAttachments( 433 ctx *context, 434 ids []params.MachineStorageId, 435 volumeAttachmentResults []params.VolumeAttachmentResult, 436 ) error { 437 // Filter out the already-attached. 438 pending := make([]params.MachineStorageId, 0, len(ids)) 439 for i, result := range volumeAttachmentResults { 440 if result.Error == nil { 441 // Volume attachment is already provisioned: if we 442 // didn't (re)attach in this session, then we must 443 // do so now. 444 action := "nothing to do" 445 if _, ok := ctx.volumeAttachments[ids[i]]; !ok { 446 // Not yet (re)attached in this session. 447 pending = append(pending, ids[i]) 448 action = "will reattach" 449 } 450 logger.Debugf( 451 "%s is already attached to %s, %s", 452 ids[i].AttachmentTag, ids[i].MachineTag, action, 453 ) 454 removePendingVolumeAttachment(ctx, ids[i]) 455 continue 456 } 457 if !params.IsCodeNotProvisioned(result.Error) { 458 return errors.Annotatef( 459 result.Error, "getting information for attachment %v", ids[i], 460 ) 461 } 462 // The volume has not yet been provisioned, so record its tag 463 // to enquire about parameters below. 464 pending = append(pending, ids[i]) 465 } 466 if len(pending) == 0 { 467 return nil 468 } 469 params, err := volumeAttachmentParams(ctx, pending) 470 if err != nil { 471 return errors.Trace(err) 472 } 473 for i, params := range params { 474 if params.Machine.Kind() != names.MachineTagKind { 475 logger.Debugf("not queuing volume attachment for non-machine %v", params.Machine) 476 continue 477 } 478 if volume, ok := ctx.volumes[params.Volume]; ok { 479 params.VolumeId = volume.VolumeId 480 } 481 updatePendingVolumeAttachment(ctx, pending[i], params) 482 } 483 return nil 484 } 485 486 // volumeAttachmentParams obtains the specified attachments' parameters. 487 func volumeAttachmentParams( 488 ctx *context, ids []params.MachineStorageId, 489 ) ([]storage.VolumeAttachmentParams, error) { 490 paramsResults, err := ctx.config.Volumes.VolumeAttachmentParams(ids) 491 if err != nil { 492 return nil, errors.Annotate(err, "getting volume attachment params") 493 } 494 attachmentParams := make([]storage.VolumeAttachmentParams, len(ids)) 495 for i, result := range paramsResults { 496 if result.Error != nil { 497 return nil, errors.Annotate(result.Error, "getting volume attachment parameters") 498 } 499 params, err := volumeAttachmentParamsFromParams(result.Result) 500 if err != nil { 501 return nil, errors.Annotate(err, "getting volume attachment parameters") 502 } 503 attachmentParams[i] = params 504 } 505 return attachmentParams, nil 506 } 507 508 // volumeParams obtains the specified volumes' parameters. 509 func volumeParams(ctx *context, tags []names.VolumeTag) ([]storage.VolumeParams, error) { 510 paramsResults, err := ctx.config.Volumes.VolumeParams(tags) 511 if err != nil { 512 return nil, errors.Annotate(err, "getting volume params") 513 } 514 allParams := make([]storage.VolumeParams, len(tags)) 515 for i, result := range paramsResults { 516 if result.Error != nil { 517 return nil, errors.Annotate(result.Error, "getting volume parameters") 518 } 519 params, err := volumeParamsFromParams(result.Result) 520 if err != nil { 521 return nil, errors.Annotate(err, "getting volume parameters") 522 } 523 allParams[i] = params 524 } 525 return allParams, nil 526 } 527 528 // removeVolumeParams obtains the specified volumes' destruction parameters. 529 func removeVolumeParams(ctx *context, tags []names.VolumeTag) ([]params.RemoveVolumeParams, error) { 530 paramsResults, err := ctx.config.Volumes.RemoveVolumeParams(tags) 531 if err != nil { 532 return nil, errors.Annotate(err, "getting volume params") 533 } 534 allParams := make([]params.RemoveVolumeParams, len(tags)) 535 for i, result := range paramsResults { 536 if result.Error != nil { 537 return nil, errors.Annotate(result.Error, "getting volume removal parameters") 538 } 539 allParams[i] = result.Result 540 } 541 return allParams, nil 542 } 543 544 func volumesFromStorage(in []storage.Volume) []params.Volume { 545 out := make([]params.Volume, len(in)) 546 for i, v := range in { 547 out[i] = params.Volume{ 548 v.Tag.String(), 549 params.VolumeInfo{ 550 v.VolumeId, 551 v.HardwareId, 552 v.WWN, 553 "", // pool 554 v.Size, 555 v.Persistent, 556 }, 557 } 558 } 559 return out 560 } 561 562 func volumeAttachmentsFromStorage(in []storage.VolumeAttachment) []params.VolumeAttachment { 563 out := make([]params.VolumeAttachment, len(in)) 564 for i, v := range in { 565 planInfo := ¶ms.VolumeAttachmentPlanInfo{} 566 if v.PlanInfo != nil { 567 planInfo.DeviceType = v.PlanInfo.DeviceType 568 planInfo.DeviceAttributes = v.PlanInfo.DeviceAttributes 569 } else { 570 planInfo = nil 571 } 572 out[i] = params.VolumeAttachment{ 573 v.Volume.String(), 574 v.Machine.String(), 575 params.VolumeAttachmentInfo{ 576 v.DeviceName, 577 v.DeviceLink, 578 v.BusAddress, 579 v.ReadOnly, 580 planInfo, 581 }, 582 } 583 } 584 return out 585 } 586 587 func volumeFromParams(in params.Volume) (storage.Volume, error) { 588 volumeTag, err := names.ParseVolumeTag(in.VolumeTag) 589 if err != nil { 590 return storage.Volume{}, errors.Trace(err) 591 } 592 return storage.Volume{ 593 volumeTag, 594 storage.VolumeInfo{ 595 in.Info.VolumeId, 596 in.Info.HardwareId, 597 in.Info.WWN, 598 in.Info.Size, 599 in.Info.Persistent, 600 }, 601 }, nil 602 } 603 604 func volumeParamsFromParams(in params.VolumeParams) (storage.VolumeParams, error) { 605 volumeTag, err := names.ParseVolumeTag(in.VolumeTag) 606 if err != nil { 607 return storage.VolumeParams{}, errors.Trace(err) 608 } 609 providerType := storage.ProviderType(in.Provider) 610 611 var attachment *storage.VolumeAttachmentParams 612 if in.Attachment != nil { 613 if in.Attachment.Provider != in.Provider { 614 return storage.VolumeParams{}, errors.Errorf( 615 "storage provider mismatch: volume (%q), attachment (%q)", 616 in.Provider, in.Attachment.Provider, 617 ) 618 } 619 if in.Attachment.VolumeTag != in.VolumeTag { 620 return storage.VolumeParams{}, errors.Errorf( 621 "volume tag mismatch: volume (%q), attachment (%q)", 622 in.VolumeTag, in.Attachment.VolumeTag, 623 ) 624 } 625 hostTag, err := names.ParseTag(in.Attachment.MachineTag) 626 if err != nil { 627 return storage.VolumeParams{}, errors.Annotate( 628 err, "parsing attachment machine tag", 629 ) 630 } 631 attachment = &storage.VolumeAttachmentParams{ 632 AttachmentParams: storage.AttachmentParams{ 633 Provider: providerType, 634 Machine: hostTag, 635 InstanceId: instance.Id(in.Attachment.InstanceId), 636 ReadOnly: in.Attachment.ReadOnly, 637 }, 638 Volume: volumeTag, 639 } 640 } 641 return storage.VolumeParams{ 642 volumeTag, 643 in.Size, 644 providerType, 645 in.Attributes, 646 in.Tags, 647 attachment, 648 }, nil 649 } 650 651 func volumeAttachmentParamsFromParams(in params.VolumeAttachmentParams) (storage.VolumeAttachmentParams, error) { 652 hostTag, err := names.ParseTag(in.MachineTag) 653 if err != nil { 654 return storage.VolumeAttachmentParams{}, errors.Trace(err) 655 } 656 volumeTag, err := names.ParseVolumeTag(in.VolumeTag) 657 if err != nil { 658 return storage.VolumeAttachmentParams{}, errors.Trace(err) 659 } 660 return storage.VolumeAttachmentParams{ 661 AttachmentParams: storage.AttachmentParams{ 662 Provider: storage.ProviderType(in.Provider), 663 Machine: hostTag, 664 InstanceId: instance.Id(in.InstanceId), 665 ReadOnly: in.ReadOnly, 666 }, 667 Volume: volumeTag, 668 VolumeId: in.VolumeId, 669 }, nil 670 }