github.com/juju/juju@v0.0.0-20240327075706-a90865de2538/worker/storageprovisioner/volume_ops.go (about)

     1  // Copyright 2015 Canonical Ltd.
     2  // Licensed under the AGPLv3, see LICENCE file for details.
     3  
     4  package storageprovisioner
     5  
     6  import (
     7  	stdcontext "context"
     8  
     9  	"github.com/juju/errors"
    10  	"github.com/juju/names/v5"
    11  
    12  	"github.com/juju/juju/core/life"
    13  	"github.com/juju/juju/core/status"
    14  	environscontext "github.com/juju/juju/environs/context"
    15  	"github.com/juju/juju/rpc/params"
    16  	"github.com/juju/juju/storage"
    17  	"github.com/juju/juju/wrench"
    18  )
    19  
    20  // createVolumes creates volumes with the specified parameters.
    21  func createVolumes(ctx *context, ops map[names.VolumeTag]*createVolumeOp) error {
    22  	volumeParams := make([]storage.VolumeParams, 0, len(ops))
    23  	for _, op := range ops {
    24  		volumeParams = append(volumeParams, op.args)
    25  	}
    26  	paramsBySource, volumeSources, err := volumeParamsBySource(
    27  		ctx.config.StorageDir, volumeParams, ctx.config.Registry,
    28  	)
    29  	if err != nil {
    30  		return errors.Trace(err)
    31  	}
    32  	var reschedule []scheduleOp
    33  	var volumes []storage.Volume
    34  	var volumeAttachments []storage.VolumeAttachment
    35  	var statuses []params.EntityStatusArgs
    36  	for sourceName, volumeParams := range paramsBySource {
    37  		ctx.config.Logger.Debugf("creating volumes: %v", volumeParams)
    38  		volumeSource := volumeSources[sourceName]
    39  		validVolumeParams, validationErrors := validateVolumeParams(volumeSource, volumeParams)
    40  		for i, err := range validationErrors {
    41  			if err == nil {
    42  				continue
    43  			}
    44  			statuses = append(statuses, params.EntityStatusArgs{
    45  				Tag:    volumeParams[i].Tag.String(),
    46  				Status: status.Error.String(),
    47  				Info:   err.Error(),
    48  			})
    49  			ctx.config.Logger.Debugf(
    50  				"failed to validate parameters for %s: %v",
    51  				names.ReadableString(volumeParams[i].Tag), err,
    52  			)
    53  		}
    54  		volumeParams = validVolumeParams
    55  		if len(volumeParams) == 0 {
    56  			continue
    57  		}
    58  		results, err := volumeSource.CreateVolumes(ctx.config.CloudCallContextFunc(stdcontext.Background()), volumeParams)
    59  		if err != nil {
    60  			return errors.Annotatef(err, "creating volumes from source %q", sourceName)
    61  		}
    62  		for i, result := range results {
    63  			statuses = append(statuses, params.EntityStatusArgs{
    64  				Tag:    volumeParams[i].Tag.String(),
    65  				Status: status.Attaching.String(),
    66  			})
    67  			entityStatus := &statuses[len(statuses)-1]
    68  			if result.Error != nil {
    69  				// Reschedule the volume creation.
    70  				reschedule = append(reschedule, ops[volumeParams[i].Tag])
    71  
    72  				// Note: we keep the status as "pending" to indicate
    73  				// that we will retry. When we distinguish between
    74  				// transient and permanent errors, we will set the
    75  				// status to "error" for permanent errors.
    76  				entityStatus.Status = status.Pending.String()
    77  				entityStatus.Info = result.Error.Error()
    78  				ctx.config.Logger.Debugf(
    79  					"failed to create %s: %v",
    80  					names.ReadableString(volumeParams[i].Tag),
    81  					result.Error,
    82  				)
    83  				continue
    84  			}
    85  			volumes = append(volumes, *result.Volume)
    86  			if result.VolumeAttachment != nil {
    87  				entityStatus.Status = status.Attached.String()
    88  				volumeAttachments = append(volumeAttachments, *result.VolumeAttachment)
    89  			}
    90  		}
    91  	}
    92  	scheduleOperations(ctx, reschedule...)
    93  	setStatus(ctx, statuses)
    94  	if len(volumes) == 0 {
    95  		return nil
    96  	}
    97  	// TODO(axw) we need to be able to list volumes in the provider,
    98  	// by environment, so that we can "harvest" them if they're
    99  	// unknown. This will take care of killing volumes that we fail
   100  	// to record in state.
   101  	errorResults, err := ctx.config.Volumes.SetVolumeInfo(volumesFromStorage(volumes))
   102  	if err != nil {
   103  		return errors.Annotate(err, "publishing volumes to state")
   104  	}
   105  	for i, result := range errorResults {
   106  		if result.Error != nil {
   107  			ctx.config.Logger.Errorf(
   108  				"publishing volume %s to state: %v",
   109  				volumes[i].Tag.Id(),
   110  				result.Error,
   111  			)
   112  		}
   113  	}
   114  	for _, v := range volumes {
   115  		updateVolume(ctx, v)
   116  	}
   117  	// Note: the storage provisioner that creates a volume is also
   118  	// responsible for creating the volume attachment. It is therefore
   119  	// safe to set the volume attachment info after the volume info,
   120  	// without leading to the possibility of concurrent, duplicate
   121  	// attachments.
   122  	err = setVolumeAttachmentInfo(ctx, volumeAttachments)
   123  	if err != nil {
   124  		return errors.Trace(err)
   125  	}
   126  	return nil
   127  }
   128  
   129  // attachVolumes creates volume attachments with the specified parameters.
   130  func attachVolumes(ctx *context, ops map[params.MachineStorageId]*attachVolumeOp) error {
   131  	volumeAttachmentParams := make([]storage.VolumeAttachmentParams, 0, len(ops))
   132  	for _, op := range ops {
   133  		volumeAttachmentParams = append(volumeAttachmentParams, op.args)
   134  	}
   135  	paramsBySource, volumeSources, err := volumeAttachmentParamsBySource(
   136  		ctx.config.StorageDir, volumeAttachmentParams, ctx.config.Registry,
   137  	)
   138  	if err != nil {
   139  		return errors.Trace(err)
   140  	}
   141  	var reschedule []scheduleOp
   142  	var volumeAttachments []storage.VolumeAttachment
   143  	var statuses []params.EntityStatusArgs
   144  	for sourceName, volumeAttachmentParams := range paramsBySource {
   145  		ctx.config.Logger.Debugf("attaching volumes: %+v", volumeAttachmentParams)
   146  		volumeSource := volumeSources[sourceName]
   147  		if volumeSource == nil {
   148  			// The storage provider does not support dynamic
   149  			// storage, there's nothing for the provisioner
   150  			// to do here.
   151  			continue
   152  		}
   153  		results, err := volumeSource.AttachVolumes(ctx.config.CloudCallContextFunc(stdcontext.Background()), volumeAttachmentParams)
   154  		if err != nil {
   155  			return errors.Annotatef(err, "attaching volumes from source %q", sourceName)
   156  		}
   157  
   158  		for i, result := range results {
   159  			p := volumeAttachmentParams[i]
   160  			statuses = append(statuses, params.EntityStatusArgs{
   161  				Tag:    p.Volume.String(),
   162  				Status: status.Attached.String(),
   163  			})
   164  			entityStatus := &statuses[len(statuses)-1]
   165  			if result.Error != nil {
   166  				// Reschedule the volume attachment.
   167  				id := params.MachineStorageId{
   168  					MachineTag:    p.Machine.String(),
   169  					AttachmentTag: p.Volume.String(),
   170  				}
   171  				reschedule = append(reschedule, ops[id])
   172  
   173  				// Note: we keep the status as "attaching" to
   174  				// indicate that we will retry. When we distinguish
   175  				// between transient and permanent errors, we will
   176  				// set the status to "error" for permanent errors.
   177  				entityStatus.Status = status.Attaching.String()
   178  				entityStatus.Info = result.Error.Error()
   179  				ctx.config.Logger.Warningf(
   180  					"failed to attach %s to %s: %v",
   181  					names.ReadableString(p.Volume),
   182  					names.ReadableString(p.Machine),
   183  					result.Error,
   184  				)
   185  				continue
   186  			}
   187  			volumeAttachments = append(volumeAttachments, *result.VolumeAttachment)
   188  		}
   189  	}
   190  	scheduleOperations(ctx, reschedule...)
   191  	setStatus(ctx, statuses)
   192  	if err := createVolumeAttachmentPlans(ctx, volumeAttachments); err != nil {
   193  		return errors.Trace(err)
   194  	}
   195  	if err := setVolumeAttachmentInfo(ctx, volumeAttachments); err != nil {
   196  		return errors.Trace(err)
   197  	}
   198  
   199  	return nil
   200  }
   201  
   202  // createVolumeAttachmentPlans creates a volume info plan in state, which notifies the machine
   203  // agent of the target instance that something has been attached to it.
   204  func createVolumeAttachmentPlans(ctx *context, volumeAttachments []storage.VolumeAttachment) error {
   205  	// NOTE(gsamfira): should we merge this with setVolumeInfo?
   206  	if len(volumeAttachments) == 0 {
   207  		return nil
   208  	}
   209  
   210  	volumeAttachmentPlans := make([]params.VolumeAttachmentPlan, len(volumeAttachments))
   211  	for i, val := range volumeAttachments {
   212  		volumeAttachmentPlans[i] = volumeAttachmentPlanFromAttachment(val)
   213  	}
   214  
   215  	errorResults, err := ctx.config.Volumes.CreateVolumeAttachmentPlans(volumeAttachmentPlans)
   216  	if err != nil {
   217  		return errors.Annotatef(err, "creating volume plans")
   218  	}
   219  	for i, result := range errorResults {
   220  		if result.Error != nil {
   221  			return errors.Annotatef(
   222  				result.Error, "creating volume plan of %s to %s to state",
   223  				names.ReadableString(volumeAttachments[i].Volume),
   224  				names.ReadableString(volumeAttachments[i].Machine),
   225  			)
   226  		}
   227  		// Record the volume attachment in the context.
   228  		id := params.MachineStorageId{
   229  			MachineTag:    volumeAttachmentPlans[i].MachineTag,
   230  			AttachmentTag: volumeAttachmentPlans[i].VolumeTag,
   231  		}
   232  		ctx.volumeAttachments[id] = volumeAttachments[i]
   233  		// removePendingVolumeAttachment(ctx, id)
   234  	}
   235  	return nil
   236  }
   237  
   238  func volumeAttachmentPlanFromAttachment(attachment storage.VolumeAttachment) params.VolumeAttachmentPlan {
   239  	var planInfo params.VolumeAttachmentPlanInfo
   240  	if attachment.PlanInfo != nil {
   241  		planInfo.DeviceAttributes = attachment.PlanInfo.DeviceAttributes
   242  		planInfo.DeviceType = attachment.PlanInfo.DeviceType
   243  	} else {
   244  		planInfo.DeviceType = storage.DeviceTypeLocal
   245  	}
   246  	return params.VolumeAttachmentPlan{
   247  		VolumeTag:  attachment.Volume.String(),
   248  		MachineTag: attachment.Machine.String(),
   249  		Life:       life.Alive,
   250  		PlanInfo:   planInfo,
   251  	}
   252  }
   253  
   254  // removeVolumes destroys or releases volumes with the specified parameters.
   255  func removeVolumes(ctx *context, ops map[names.VolumeTag]*removeVolumeOp) error {
   256  	tags := make([]names.VolumeTag, 0, len(ops))
   257  	for tag := range ops {
   258  		tags = append(tags, tag)
   259  	}
   260  	removeVolumeParams, err := removeVolumeParams(ctx, tags)
   261  	if err != nil {
   262  		return errors.Trace(err)
   263  	}
   264  	volumeParams := make([]storage.VolumeParams, len(tags))
   265  	removeVolumeParamsByTag := make(map[names.VolumeTag]params.RemoveVolumeParams)
   266  	for i, args := range removeVolumeParams {
   267  		removeVolumeParamsByTag[tags[i]] = args
   268  		volumeParams[i] = storage.VolumeParams{
   269  			Tag:      tags[i],
   270  			Provider: storage.ProviderType(args.Provider),
   271  		}
   272  	}
   273  	paramsBySource, volumeSources, err := volumeParamsBySource(
   274  		ctx.config.StorageDir, volumeParams, ctx.config.Registry,
   275  	)
   276  	if err != nil {
   277  		return errors.Trace(err)
   278  	}
   279  	var remove []names.Tag
   280  	var reschedule []scheduleOp
   281  	var statuses []params.EntityStatusArgs
   282  	removeVolumes := func(tags []names.VolumeTag, ids []string, f func(environscontext.ProviderCallContext, []string) ([]error, error)) error {
   283  		if len(ids) == 0 {
   284  			return nil
   285  		}
   286  		errs, err := f(ctx.config.CloudCallContextFunc(stdcontext.Background()), ids)
   287  		if err != nil {
   288  			return errors.Trace(err)
   289  		}
   290  		for i, err := range errs {
   291  			tag := tags[i]
   292  			if wrench.IsActive("storageprovisioner", "RemoveVolume") {
   293  				err = errors.New("wrench active")
   294  			}
   295  			if err == nil {
   296  				remove = append(remove, tag)
   297  				continue
   298  			}
   299  			// Failed to destroy or release volume; reschedule and update status.
   300  			reschedule = append(reschedule, ops[tag])
   301  			statuses = append(statuses, params.EntityStatusArgs{
   302  				Tag:    tag.String(),
   303  				Status: status.Error.String(),
   304  				Info:   errors.Annotate(err, "destroying volume").Error(),
   305  			})
   306  		}
   307  		return nil
   308  	}
   309  	for sourceName, volumeParams := range paramsBySource {
   310  		ctx.config.Logger.Debugf("removing volumes from %q: %v", sourceName, volumeParams)
   311  		volumeSource := volumeSources[sourceName]
   312  		removeTags := make([]names.VolumeTag, len(volumeParams))
   313  		removeParams := make([]params.RemoveVolumeParams, len(volumeParams))
   314  		for i, args := range volumeParams {
   315  			removeTags[i] = args.Tag
   316  			removeParams[i] = removeVolumeParamsByTag[args.Tag]
   317  		}
   318  		destroyTags, destroyIds, releaseTags, releaseIds := partitionRemoveVolumeParams(removeTags, removeParams)
   319  		if err := removeVolumes(destroyTags, destroyIds, volumeSource.DestroyVolumes); err != nil {
   320  			return errors.Trace(err)
   321  		}
   322  		if err := removeVolumes(releaseTags, releaseIds, volumeSource.ReleaseVolumes); err != nil {
   323  			return errors.Trace(err)
   324  		}
   325  	}
   326  	scheduleOperations(ctx, reschedule...)
   327  	setStatus(ctx, statuses)
   328  	if err := removeEntities(ctx, remove); err != nil {
   329  		return errors.Annotate(err, "removing volumes from state")
   330  	}
   331  	return nil
   332  }
   333  
   334  func partitionRemoveVolumeParams(removeTags []names.VolumeTag, removeParams []params.RemoveVolumeParams) (
   335  	destroyTags []names.VolumeTag, destroyIds []string,
   336  	releaseTags []names.VolumeTag, releaseIds []string,
   337  ) {
   338  	destroyTags = make([]names.VolumeTag, 0, len(removeParams))
   339  	destroyIds = make([]string, 0, len(removeParams))
   340  	releaseTags = make([]names.VolumeTag, 0, len(removeParams))
   341  	releaseIds = make([]string, 0, len(removeParams))
   342  	for i, args := range removeParams {
   343  		tag := removeTags[i]
   344  		if args.Destroy {
   345  			destroyTags = append(destroyTags, tag)
   346  			destroyIds = append(destroyIds, args.VolumeId)
   347  		} else {
   348  			releaseTags = append(releaseTags, tag)
   349  			releaseIds = append(releaseIds, args.VolumeId)
   350  		}
   351  	}
   352  	return
   353  }
   354  
   355  // detachVolumes destroys volume attachments with the specified parameters.
   356  func detachVolumes(ctx *context, ops map[params.MachineStorageId]*detachVolumeOp) error {
   357  	volumeAttachmentParams := make([]storage.VolumeAttachmentParams, 0, len(ops))
   358  	for _, op := range ops {
   359  		volumeAttachmentParams = append(volumeAttachmentParams, op.args)
   360  	}
   361  	paramsBySource, volumeSources, err := volumeAttachmentParamsBySource(
   362  		ctx.config.StorageDir, volumeAttachmentParams, ctx.config.Registry,
   363  	)
   364  	if err != nil {
   365  		return errors.Trace(err)
   366  	}
   367  	var reschedule []scheduleOp
   368  	var statuses []params.EntityStatusArgs
   369  	var remove []params.MachineStorageId
   370  	for sourceName, volumeAttachmentParams := range paramsBySource {
   371  		ctx.config.Logger.Debugf("detaching volumes: %+v", volumeAttachmentParams)
   372  		volumeSource := volumeSources[sourceName]
   373  		if volumeSource == nil {
   374  			// The storage provider does not support dynamic
   375  			// storage, there's nothing for the provisioner
   376  			// to do here.
   377  			continue
   378  		}
   379  		errs, err := volumeSource.DetachVolumes(ctx.config.CloudCallContextFunc(stdcontext.Background()), volumeAttachmentParams)
   380  		if err != nil {
   381  			return errors.Annotatef(err, "detaching volumes from source %q", sourceName)
   382  		}
   383  		for i, err := range errs {
   384  			p := volumeAttachmentParams[i]
   385  			statuses = append(statuses, params.EntityStatusArgs{
   386  				Tag: p.Volume.String(),
   387  				// TODO(axw) when we support multiple
   388  				// attachment, we'll have to check if
   389  				// there are any other attachments
   390  				// before saying the status "detached".
   391  				Status: status.Detached.String(),
   392  			})
   393  			id := params.MachineStorageId{
   394  				MachineTag:    p.Machine.String(),
   395  				AttachmentTag: p.Volume.String(),
   396  			}
   397  			entityStatus := &statuses[len(statuses)-1]
   398  			if wrench.IsActive("storageprovisioner", "DetachVolume") {
   399  				err = errors.New("wrench active")
   400  			}
   401  			if err != nil {
   402  				reschedule = append(reschedule, ops[id])
   403  				entityStatus.Status = status.Detaching.String()
   404  				entityStatus.Info = err.Error()
   405  				ctx.config.Logger.Debugf(
   406  					"failed to detach %s from %s: %v",
   407  					names.ReadableString(p.Volume),
   408  					names.ReadableString(p.Machine),
   409  					err,
   410  				)
   411  				continue
   412  			}
   413  			remove = append(remove, id)
   414  		}
   415  	}
   416  	scheduleOperations(ctx, reschedule...)
   417  	setStatus(ctx, statuses)
   418  	if err := removeAttachments(ctx, remove); err != nil {
   419  		return errors.Annotate(err, "removing attachments from state")
   420  	}
   421  	for _, id := range remove {
   422  		delete(ctx.volumeAttachments, id)
   423  	}
   424  	return nil
   425  }
   426  
   427  // volumeParamsBySource separates the volume parameters by volume source.
   428  func volumeParamsBySource(
   429  	baseStorageDir string,
   430  	params []storage.VolumeParams,
   431  	registry storage.ProviderRegistry,
   432  ) (map[string][]storage.VolumeParams, map[string]storage.VolumeSource, error) {
   433  	// TODO(axw) later we may have multiple instantiations (sources)
   434  	// for a storage provider, e.g. multiple Ceph installations. For
   435  	// now we assume a single source for each provider type, with no
   436  	// configuration.
   437  	volumeSources := make(map[string]storage.VolumeSource)
   438  	for _, params := range params {
   439  		sourceName := string(params.Provider)
   440  		if _, ok := volumeSources[sourceName]; ok {
   441  			continue
   442  		}
   443  		volumeSource, err := volumeSource(
   444  			baseStorageDir, sourceName, params.Provider, registry,
   445  		)
   446  		if errors.Cause(err) == errNonDynamic {
   447  			volumeSource = nil
   448  		} else if err != nil {
   449  			return nil, nil, errors.Annotate(err, "getting volume source")
   450  		}
   451  		volumeSources[sourceName] = volumeSource
   452  	}
   453  	paramsBySource := make(map[string][]storage.VolumeParams)
   454  	for _, params := range params {
   455  		sourceName := string(params.Provider)
   456  		volumeSource := volumeSources[sourceName]
   457  		if volumeSource == nil {
   458  			// Ignore nil volume sources; this means that the
   459  			// volume should be created by the machine-provisioner.
   460  			continue
   461  		}
   462  		paramsBySource[sourceName] = append(paramsBySource[sourceName], params)
   463  	}
   464  	return paramsBySource, volumeSources, nil
   465  }
   466  
   467  // validateVolumeParams validates a collection of volume parameters.
   468  func validateVolumeParams(
   469  	volumeSource storage.VolumeSource, volumeParams []storage.VolumeParams,
   470  ) ([]storage.VolumeParams, []error) {
   471  	valid := make([]storage.VolumeParams, 0, len(volumeParams))
   472  	results := make([]error, len(volumeParams))
   473  	for i, params := range volumeParams {
   474  		err := volumeSource.ValidateVolumeParams(params)
   475  		if err == nil {
   476  			valid = append(valid, params)
   477  		}
   478  		results[i] = err
   479  	}
   480  	return valid, results
   481  }
   482  
   483  // volumeAttachmentParamsBySource separates the volume attachment parameters by volume source.
   484  func volumeAttachmentParamsBySource(
   485  	baseStorageDir string,
   486  	params []storage.VolumeAttachmentParams,
   487  	registry storage.ProviderRegistry,
   488  ) (map[string][]storage.VolumeAttachmentParams, map[string]storage.VolumeSource, error) {
   489  	// TODO(axw) later we may have multiple instantiations (sources)
   490  	// for a storage provider, e.g. multiple Ceph installations. For
   491  	// now we assume a single source for each provider type, with no
   492  	// configuration.
   493  	volumeSources := make(map[string]storage.VolumeSource)
   494  	paramsBySource := make(map[string][]storage.VolumeAttachmentParams)
   495  	for _, params := range params {
   496  		sourceName := string(params.Provider)
   497  		paramsBySource[sourceName] = append(paramsBySource[sourceName], params)
   498  		if _, ok := volumeSources[sourceName]; ok {
   499  			continue
   500  		}
   501  		volumeSource, err := volumeSource(
   502  			baseStorageDir, sourceName, params.Provider, registry,
   503  		)
   504  		if errors.Cause(err) == errNonDynamic {
   505  			volumeSource = nil
   506  		} else if err != nil {
   507  			return nil, nil, errors.Annotate(err, "getting volume source")
   508  		}
   509  		volumeSources[sourceName] = volumeSource
   510  	}
   511  	return paramsBySource, volumeSources, nil
   512  }
   513  
   514  func setVolumeAttachmentInfo(ctx *context, volumeAttachments []storage.VolumeAttachment) error {
   515  	if len(volumeAttachments) == 0 {
   516  		return nil
   517  	}
   518  	// TODO(axw) we need to be able to list volume attachments in the
   519  	// provider, by environment, so that we can "harvest" them if they're
   520  	// unknown. This will take care of killing volumes that we fail to
   521  	// record in state.
   522  	errorResults, err := ctx.config.Volumes.SetVolumeAttachmentInfo(
   523  		volumeAttachmentsFromStorage(volumeAttachments),
   524  	)
   525  	if err != nil {
   526  		return errors.Annotate(err, "publishing volumes to state")
   527  	}
   528  	for i, result := range errorResults {
   529  		if result.Error != nil {
   530  			return errors.Annotatef(
   531  				result.Error, "publishing attachment of %s to %s to state",
   532  				names.ReadableString(volumeAttachments[i].Volume),
   533  				names.ReadableString(volumeAttachments[i].Machine),
   534  			)
   535  		}
   536  		// Record the volume attachment in the context.
   537  		id := params.MachineStorageId{
   538  			MachineTag:    volumeAttachments[i].Machine.String(),
   539  			AttachmentTag: volumeAttachments[i].Volume.String(),
   540  		}
   541  		ctx.volumeAttachments[id] = volumeAttachments[i]
   542  		removePendingVolumeAttachment(ctx, id)
   543  	}
   544  	return nil
   545  }
   546  
   547  type createVolumeOp struct {
   548  	exponentialBackoff
   549  	args storage.VolumeParams
   550  }
   551  
   552  func (op *createVolumeOp) key() interface{} {
   553  	return op.args.Tag
   554  }
   555  
   556  type removeVolumeOp struct {
   557  	exponentialBackoff
   558  	tag names.VolumeTag
   559  }
   560  
   561  func (op *removeVolumeOp) key() interface{} {
   562  	return op.tag
   563  }
   564  
   565  type attachVolumeOp struct {
   566  	exponentialBackoff
   567  	args storage.VolumeAttachmentParams
   568  }
   569  
   570  func (op *attachVolumeOp) key() interface{} {
   571  	return params.MachineStorageId{
   572  		MachineTag:    op.args.Machine.String(),
   573  		AttachmentTag: op.args.Volume.String(),
   574  	}
   575  }
   576  
   577  type detachVolumeOp struct {
   578  	exponentialBackoff
   579  	args storage.VolumeAttachmentParams
   580  }
   581  
   582  func (op *detachVolumeOp) key() interface{} {
   583  	return params.MachineStorageId{
   584  		MachineTag:    op.args.Machine.String(),
   585  		AttachmentTag: op.args.Volume.String(),
   586  	}
   587  }