github.com/cloud-green/juju@v0.0.0-20151002100041-a00291338d3d/worker/storageprovisioner/volume_ops.go (about)

     1  // Copyright 2015 Canonical Ltd.
     2  // Licensed under the AGPLv3, see LICENCE file for details.
     3  
     4  package storageprovisioner
     5  
     6  import (
     7  	"github.com/juju/errors"
     8  	"github.com/juju/names"
     9  
    10  	"github.com/juju/juju/apiserver/params"
    11  	"github.com/juju/juju/environs/config"
    12  	"github.com/juju/juju/storage"
    13  )
    14  
    15  // createVolumes creates volumes with the specified parameters.
    16  func createVolumes(ctx *context, ops map[names.VolumeTag]*createVolumeOp) error {
    17  	volumeParams := make([]storage.VolumeParams, 0, len(ops))
    18  	for _, op := range ops {
    19  		volumeParams = append(volumeParams, op.args)
    20  	}
    21  	paramsBySource, volumeSources, err := volumeParamsBySource(
    22  		ctx.environConfig, ctx.storageDir, volumeParams,
    23  	)
    24  	if err != nil {
    25  		return errors.Trace(err)
    26  	}
    27  	var reschedule []scheduleOp
    28  	var volumes []storage.Volume
    29  	var volumeAttachments []storage.VolumeAttachment
    30  	var statuses []params.EntityStatusArgs
    31  	for sourceName, volumeParams := range paramsBySource {
    32  		logger.Debugf("creating volumes: %v", volumeParams)
    33  		volumeSource := volumeSources[sourceName]
    34  		results, err := volumeSource.CreateVolumes(volumeParams)
    35  		if err != nil {
    36  			return errors.Annotatef(err, "creating volumes from source %q", sourceName)
    37  		}
    38  		for i, result := range results {
    39  			statuses = append(statuses, params.EntityStatusArgs{
    40  				Tag:    volumeParams[i].Tag.String(),
    41  				Status: params.StatusAttaching,
    42  			})
    43  			status := &statuses[len(statuses)-1]
    44  			if result.Error != nil {
    45  				// Reschedule the volume creation.
    46  				reschedule = append(reschedule, ops[volumeParams[i].Tag])
    47  
    48  				// Note: we keep the status as "pending" to indicate
    49  				// that we will retry. When we distinguish between
    50  				// transient and permanent errors, we will set the
    51  				// status to "error" for permanent errors.
    52  				status.Status = params.StatusPending
    53  				status.Info = result.Error.Error()
    54  				logger.Debugf(
    55  					"failed to create %s: %v",
    56  					names.ReadableString(volumeParams[i].Tag),
    57  					result.Error,
    58  				)
    59  				continue
    60  			}
    61  			volumes = append(volumes, *result.Volume)
    62  			if result.VolumeAttachment != nil {
    63  				status.Status = params.StatusAttached
    64  				volumeAttachments = append(volumeAttachments, *result.VolumeAttachment)
    65  			}
    66  		}
    67  	}
    68  	scheduleOperations(ctx, reschedule...)
    69  	setStatus(ctx, statuses)
    70  	if len(volumes) == 0 {
    71  		return nil
    72  	}
    73  	// TODO(axw) we need to be able to list volumes in the provider,
    74  	// by environment, so that we can "harvest" them if they're
    75  	// unknown. This will take care of killing volumes that we fail
    76  	// to record in state.
    77  	errorResults, err := ctx.volumeAccessor.SetVolumeInfo(volumesFromStorage(volumes))
    78  	if err != nil {
    79  		return errors.Annotate(err, "publishing volumes to state")
    80  	}
    81  	for i, result := range errorResults {
    82  		if result.Error != nil {
    83  			logger.Errorf(
    84  				"publishing volume %s to state: %v",
    85  				volumes[i].Tag.Id(),
    86  				result.Error,
    87  			)
    88  		}
    89  	}
    90  	for _, v := range volumes {
    91  		updateVolume(ctx, v)
    92  	}
    93  	// Note: the storage provisioner that creates a volume is also
    94  	// responsible for creating the volume attachment. It is therefore
    95  	// safe to set the volume attachment info after the volume info,
    96  	// without leading to the possibility of concurrent, duplicate
    97  	// attachments.
    98  	err = setVolumeAttachmentInfo(ctx, volumeAttachments)
    99  	if err != nil {
   100  		return errors.Trace(err)
   101  	}
   102  	return nil
   103  }
   104  
   105  // attachVolumes creates volume attachments with the specified parameters.
   106  func attachVolumes(ctx *context, ops map[params.MachineStorageId]*attachVolumeOp) error {
   107  	volumeAttachmentParams := make([]storage.VolumeAttachmentParams, 0, len(ops))
   108  	for _, op := range ops {
   109  		volumeAttachmentParams = append(volumeAttachmentParams, op.args)
   110  	}
   111  	paramsBySource, volumeSources, err := volumeAttachmentParamsBySource(
   112  		ctx.environConfig, ctx.storageDir, volumeAttachmentParams,
   113  	)
   114  	if err != nil {
   115  		return errors.Trace(err)
   116  	}
   117  	var reschedule []scheduleOp
   118  	var volumeAttachments []storage.VolumeAttachment
   119  	var statuses []params.EntityStatusArgs
   120  	for sourceName, volumeAttachmentParams := range paramsBySource {
   121  		logger.Debugf("attaching volumes: %+v", volumeAttachmentParams)
   122  		volumeSource := volumeSources[sourceName]
   123  		results, err := volumeSource.AttachVolumes(volumeAttachmentParams)
   124  		if err != nil {
   125  			return errors.Annotatef(err, "attaching volumes from source %q", sourceName)
   126  		}
   127  		for i, result := range results {
   128  			p := volumeAttachmentParams[i]
   129  			statuses = append(statuses, params.EntityStatusArgs{
   130  				Tag:    p.Volume.String(),
   131  				Status: params.StatusAttached,
   132  			})
   133  			status := &statuses[len(statuses)-1]
   134  			if result.Error != nil {
   135  				// Reschedule the volume attachment.
   136  				id := params.MachineStorageId{
   137  					MachineTag:    p.Machine.String(),
   138  					AttachmentTag: p.Volume.String(),
   139  				}
   140  				reschedule = append(reschedule, ops[id])
   141  
   142  				// Note: we keep the status as "attaching" to
   143  				// indicate that we will retry. When we distinguish
   144  				// between transient and permanent errors, we will
   145  				// set the status to "error" for permanent errors.
   146  				status.Status = params.StatusAttaching
   147  				status.Info = result.Error.Error()
   148  				logger.Debugf(
   149  					"failed to attach %s to %s: %v",
   150  					names.ReadableString(p.Volume),
   151  					names.ReadableString(p.Machine),
   152  					result.Error,
   153  				)
   154  				continue
   155  			}
   156  			volumeAttachments = append(volumeAttachments, *result.VolumeAttachment)
   157  		}
   158  	}
   159  	scheduleOperations(ctx, reschedule...)
   160  	setStatus(ctx, statuses)
   161  	if err := setVolumeAttachmentInfo(ctx, volumeAttachments); err != nil {
   162  		return errors.Trace(err)
   163  	}
   164  	return nil
   165  }
   166  
   167  // destroyVolumes destroys volumes with the specified parameters.
   168  func destroyVolumes(ctx *context, ops map[names.VolumeTag]*destroyVolumeOp) error {
   169  	tags := make([]names.VolumeTag, 0, len(ops))
   170  	for tag := range ops {
   171  		tags = append(tags, tag)
   172  	}
   173  	volumeParams, err := volumeParams(ctx, tags)
   174  	if err != nil {
   175  		return errors.Trace(err)
   176  	}
   177  	paramsBySource, volumeSources, err := volumeParamsBySource(
   178  		ctx.environConfig, ctx.storageDir, volumeParams,
   179  	)
   180  	if err != nil {
   181  		return errors.Trace(err)
   182  	}
   183  	var remove []names.Tag
   184  	var reschedule []scheduleOp
   185  	var statuses []params.EntityStatusArgs
   186  	for sourceName, volumeParams := range paramsBySource {
   187  		logger.Debugf("destroying volumes from %q: %v", sourceName, volumeParams)
   188  		volumeSource := volumeSources[sourceName]
   189  		volumeIds := make([]string, len(volumeParams))
   190  		for i, volumeParams := range volumeParams {
   191  			volume, ok := ctx.volumes[volumeParams.Tag]
   192  			if !ok {
   193  				return errors.NotFoundf("volume %s", volumeParams.Tag.Id())
   194  			}
   195  			volumeIds[i] = volume.VolumeId
   196  		}
   197  		errs, err := volumeSource.DestroyVolumes(volumeIds)
   198  		if err != nil {
   199  			return errors.Trace(err)
   200  		}
   201  		for i, err := range errs {
   202  			tag := volumeParams[i].Tag
   203  			if err == nil {
   204  				remove = append(remove, tag)
   205  				continue
   206  			}
   207  			// Failed to destroy volume; reschedule and update status.
   208  			reschedule = append(reschedule, ops[tag])
   209  			statuses = append(statuses, params.EntityStatusArgs{
   210  				Tag:    tag.String(),
   211  				Status: params.StatusDestroying,
   212  				Info:   err.Error(),
   213  			})
   214  		}
   215  	}
   216  	scheduleOperations(ctx, reschedule...)
   217  	setStatus(ctx, statuses)
   218  	if err := removeEntities(ctx, remove); err != nil {
   219  		return errors.Annotate(err, "removing volumes from state")
   220  	}
   221  	return nil
   222  }
   223  
   224  // detachVolumes destroys volume attachments with the specified parameters.
   225  func detachVolumes(ctx *context, ops map[params.MachineStorageId]*detachVolumeOp) error {
   226  	volumeAttachmentParams := make([]storage.VolumeAttachmentParams, 0, len(ops))
   227  	for _, op := range ops {
   228  		volumeAttachmentParams = append(volumeAttachmentParams, op.args)
   229  	}
   230  	paramsBySource, volumeSources, err := volumeAttachmentParamsBySource(
   231  		ctx.environConfig, ctx.storageDir, volumeAttachmentParams,
   232  	)
   233  	if err != nil {
   234  		return errors.Trace(err)
   235  	}
   236  	var reschedule []scheduleOp
   237  	var statuses []params.EntityStatusArgs
   238  	var remove []params.MachineStorageId
   239  	for sourceName, volumeAttachmentParams := range paramsBySource {
   240  		logger.Debugf("detaching volumes: %+v", volumeAttachmentParams)
   241  		volumeSource := volumeSources[sourceName]
   242  		errs, err := volumeSource.DetachVolumes(volumeAttachmentParams)
   243  		if err != nil {
   244  			return errors.Annotatef(err, "detaching volumes from source %q", sourceName)
   245  		}
   246  		for i, err := range errs {
   247  			p := volumeAttachmentParams[i]
   248  			statuses = append(statuses, params.EntityStatusArgs{
   249  				Tag: p.Volume.String(),
   250  				// TODO(axw) when we support multiple
   251  				// attachment, we'll have to check if
   252  				// there are any other attachments
   253  				// before saying the status "detached".
   254  				Status: params.StatusDetached,
   255  			})
   256  			id := params.MachineStorageId{
   257  				MachineTag:    p.Machine.String(),
   258  				AttachmentTag: p.Volume.String(),
   259  			}
   260  			status := &statuses[len(statuses)-1]
   261  			if err != nil {
   262  				reschedule = append(reschedule, ops[id])
   263  				status.Status = params.StatusDetaching
   264  				status.Info = err.Error()
   265  				logger.Debugf(
   266  					"failed to detach %s from %s: %v",
   267  					names.ReadableString(p.Volume),
   268  					names.ReadableString(p.Machine),
   269  					err,
   270  				)
   271  				continue
   272  			}
   273  			remove = append(remove, id)
   274  		}
   275  	}
   276  	scheduleOperations(ctx, reschedule...)
   277  	setStatus(ctx, statuses)
   278  	if err := removeAttachments(ctx, remove); err != nil {
   279  		return errors.Annotate(err, "removing attachments from state")
   280  	}
   281  	for _, id := range remove {
   282  		delete(ctx.volumeAttachments, id)
   283  	}
   284  	return nil
   285  }
   286  
   287  // volumeParamsBySource separates the volume parameters by volume source.
   288  func volumeParamsBySource(
   289  	environConfig *config.Config,
   290  	baseStorageDir string,
   291  	params []storage.VolumeParams,
   292  ) (map[string][]storage.VolumeParams, map[string]storage.VolumeSource, error) {
   293  	// TODO(axw) later we may have multiple instantiations (sources)
   294  	// for a storage provider, e.g. multiple Ceph installations. For
   295  	// now we assume a single source for each provider type, with no
   296  	// configuration.
   297  	volumeSources := make(map[string]storage.VolumeSource)
   298  	for _, params := range params {
   299  		sourceName := string(params.Provider)
   300  		if _, ok := volumeSources[sourceName]; ok {
   301  			continue
   302  		}
   303  		volumeSource, err := volumeSource(
   304  			environConfig, baseStorageDir, sourceName, params.Provider,
   305  		)
   306  		if errors.Cause(err) == errNonDynamic {
   307  			volumeSource = nil
   308  		} else if err != nil {
   309  			return nil, nil, errors.Annotate(err, "getting volume source")
   310  		}
   311  		volumeSources[sourceName] = volumeSource
   312  	}
   313  	paramsBySource := make(map[string][]storage.VolumeParams)
   314  	for _, params := range params {
   315  		sourceName := string(params.Provider)
   316  		volumeSource := volumeSources[sourceName]
   317  		if volumeSource == nil {
   318  			// Ignore nil volume sources; this means that the
   319  			// volume should be created by the machine-provisioner.
   320  			continue
   321  		}
   322  		err := volumeSource.ValidateVolumeParams(params)
   323  		switch errors.Cause(err) {
   324  		case nil:
   325  			paramsBySource[sourceName] = append(paramsBySource[sourceName], params)
   326  		default:
   327  			return nil, nil, errors.Annotatef(err, "invalid parameters for volume %s", params.Tag.Id())
   328  		}
   329  	}
   330  	return paramsBySource, volumeSources, nil
   331  }
   332  
   333  // volumeAttachmentParamsBySource separates the volume attachment parameters by volume source.
   334  func volumeAttachmentParamsBySource(
   335  	environConfig *config.Config,
   336  	baseStorageDir string,
   337  	params []storage.VolumeAttachmentParams,
   338  ) (map[string][]storage.VolumeAttachmentParams, map[string]storage.VolumeSource, error) {
   339  	// TODO(axw) later we may have multiple instantiations (sources)
   340  	// for a storage provider, e.g. multiple Ceph installations. For
   341  	// now we assume a single source for each provider type, with no
   342  	// configuration.
   343  	volumeSources := make(map[string]storage.VolumeSource)
   344  	paramsBySource := make(map[string][]storage.VolumeAttachmentParams)
   345  	for _, params := range params {
   346  		sourceName := string(params.Provider)
   347  		paramsBySource[sourceName] = append(paramsBySource[sourceName], params)
   348  		if _, ok := volumeSources[sourceName]; ok {
   349  			continue
   350  		}
   351  		volumeSource, err := volumeSource(
   352  			environConfig, baseStorageDir, sourceName, params.Provider,
   353  		)
   354  		if err != nil {
   355  			return nil, nil, errors.Annotate(err, "getting volume source")
   356  		}
   357  		volumeSources[sourceName] = volumeSource
   358  	}
   359  	return paramsBySource, volumeSources, nil
   360  }
   361  
   362  func setVolumeAttachmentInfo(ctx *context, volumeAttachments []storage.VolumeAttachment) error {
   363  	if len(volumeAttachments) == 0 {
   364  		return nil
   365  	}
   366  	// TODO(axw) we need to be able to list volume attachments in the
   367  	// provider, by environment, so that we can "harvest" them if they're
   368  	// unknown. This will take care of killing volumes that we fail to
   369  	// record in state.
   370  	errorResults, err := ctx.volumeAccessor.SetVolumeAttachmentInfo(
   371  		volumeAttachmentsFromStorage(volumeAttachments),
   372  	)
   373  	if err != nil {
   374  		return errors.Annotate(err, "publishing volumes to state")
   375  	}
   376  	for i, result := range errorResults {
   377  		if result.Error != nil {
   378  			return errors.Annotatef(
   379  				result.Error, "publishing attachment of %s to %s to state",
   380  				names.ReadableString(volumeAttachments[i].Volume),
   381  				names.ReadableString(volumeAttachments[i].Machine),
   382  			)
   383  		}
   384  		// Record the volume attachment in the context.
   385  		id := params.MachineStorageId{
   386  			MachineTag:    volumeAttachments[i].Machine.String(),
   387  			AttachmentTag: volumeAttachments[i].Volume.String(),
   388  		}
   389  		ctx.volumeAttachments[id] = volumeAttachments[i]
   390  		removePendingVolumeAttachment(ctx, id)
   391  	}
   392  	return nil
   393  }
   394  
   395  type createVolumeOp struct {
   396  	exponentialBackoff
   397  	args storage.VolumeParams
   398  }
   399  
   400  func (op *createVolumeOp) key() interface{} {
   401  	return op.args.Tag
   402  }
   403  
   404  type destroyVolumeOp struct {
   405  	exponentialBackoff
   406  	tag names.VolumeTag
   407  }
   408  
   409  func (op *destroyVolumeOp) key() interface{} {
   410  	return op.tag
   411  }
   412  
   413  type attachVolumeOp struct {
   414  	exponentialBackoff
   415  	args storage.VolumeAttachmentParams
   416  }
   417  
   418  func (op *attachVolumeOp) key() interface{} {
   419  	return params.MachineStorageId{
   420  		MachineTag:    op.args.Machine.String(),
   421  		AttachmentTag: op.args.Volume.String(),
   422  	}
   423  }
   424  
   425  type detachVolumeOp struct {
   426  	exponentialBackoff
   427  	args storage.VolumeAttachmentParams
   428  }
   429  
   430  func (op *detachVolumeOp) key() interface{} {
   431  	return params.MachineStorageId{
   432  		MachineTag:    op.args.Machine.String(),
   433  		AttachmentTag: op.args.Volume.String(),
   434  	}
   435  }