github.com/makyo/juju@v0.0.0-20160425123129-2608902037e9/worker/storageprovisioner/volume_ops.go (about)

     1  // Copyright 2015 Canonical Ltd.
     2  // Licensed under the AGPLv3, see LICENCE file for details.
     3  
     4  package storageprovisioner
     5  
     6  import (
     7  	"github.com/juju/errors"
     8  	"github.com/juju/names"
     9  
    10  	"github.com/juju/juju/apiserver/params"
    11  	"github.com/juju/juju/environs/config"
    12  	"github.com/juju/juju/status"
    13  	"github.com/juju/juju/storage"
    14  )
    15  
    16  // createVolumes creates volumes with the specified parameters.
    17  func createVolumes(ctx *context, ops map[names.VolumeTag]*createVolumeOp) error {
    18  	volumeParams := make([]storage.VolumeParams, 0, len(ops))
    19  	for _, op := range ops {
    20  		volumeParams = append(volumeParams, op.args)
    21  	}
    22  	paramsBySource, volumeSources, err := volumeParamsBySource(
    23  		ctx.modelConfig, ctx.config.StorageDir, volumeParams,
    24  	)
    25  	if err != nil {
    26  		return errors.Trace(err)
    27  	}
    28  	var reschedule []scheduleOp
    29  	var volumes []storage.Volume
    30  	var volumeAttachments []storage.VolumeAttachment
    31  	var statuses []params.EntityStatusArgs
    32  	for sourceName, volumeParams := range paramsBySource {
    33  		logger.Debugf("creating volumes: %v", volumeParams)
    34  		volumeSource := volumeSources[sourceName]
    35  		validVolumeParams, validationErrors := validateVolumeParams(volumeSource, volumeParams)
    36  		for i, err := range validationErrors {
    37  			if err == nil {
    38  				continue
    39  			}
    40  			statuses = append(statuses, params.EntityStatusArgs{
    41  				Tag:    volumeParams[i].Tag.String(),
    42  				Status: status.StatusError,
    43  				Info:   err.Error(),
    44  			})
    45  			logger.Debugf(
    46  				"failed to validate parameters for %s: %v",
    47  				names.ReadableString(volumeParams[i].Tag), err,
    48  			)
    49  		}
    50  		volumeParams = validVolumeParams
    51  		if len(volumeParams) == 0 {
    52  			continue
    53  		}
    54  		results, err := volumeSource.CreateVolumes(volumeParams)
    55  		if err != nil {
    56  			return errors.Annotatef(err, "creating volumes from source %q", sourceName)
    57  		}
    58  		for i, result := range results {
    59  			statuses = append(statuses, params.EntityStatusArgs{
    60  				Tag:    volumeParams[i].Tag.String(),
    61  				Status: status.StatusAttaching,
    62  			})
    63  			entityStatus := &statuses[len(statuses)-1]
    64  			if result.Error != nil {
    65  				// Reschedule the volume creation.
    66  				reschedule = append(reschedule, ops[volumeParams[i].Tag])
    67  
    68  				// Note: we keep the status as "pending" to indicate
    69  				// that we will retry. When we distinguish between
    70  				// transient and permanent errors, we will set the
    71  				// status to "error" for permanent errors.
    72  				entityStatus.Status = status.StatusPending
    73  				entityStatus.Info = result.Error.Error()
    74  				logger.Debugf(
    75  					"failed to create %s: %v",
    76  					names.ReadableString(volumeParams[i].Tag),
    77  					result.Error,
    78  				)
    79  				continue
    80  			}
    81  			volumes = append(volumes, *result.Volume)
    82  			if result.VolumeAttachment != nil {
    83  				entityStatus.Status = status.StatusAttached
    84  				volumeAttachments = append(volumeAttachments, *result.VolumeAttachment)
    85  			}
    86  		}
    87  	}
    88  	scheduleOperations(ctx, reschedule...)
    89  	setStatus(ctx, statuses)
    90  	if len(volumes) == 0 {
    91  		return nil
    92  	}
    93  	// TODO(axw) we need to be able to list volumes in the provider,
    94  	// by environment, so that we can "harvest" them if they're
    95  	// unknown. This will take care of killing volumes that we fail
    96  	// to record in state.
    97  	errorResults, err := ctx.config.Volumes.SetVolumeInfo(volumesFromStorage(volumes))
    98  	if err != nil {
    99  		return errors.Annotate(err, "publishing volumes to state")
   100  	}
   101  	for i, result := range errorResults {
   102  		if result.Error != nil {
   103  			logger.Errorf(
   104  				"publishing volume %s to state: %v",
   105  				volumes[i].Tag.Id(),
   106  				result.Error,
   107  			)
   108  		}
   109  	}
   110  	for _, v := range volumes {
   111  		updateVolume(ctx, v)
   112  	}
   113  	// Note: the storage provisioner that creates a volume is also
   114  	// responsible for creating the volume attachment. It is therefore
   115  	// safe to set the volume attachment info after the volume info,
   116  	// without leading to the possibility of concurrent, duplicate
   117  	// attachments.
   118  	err = setVolumeAttachmentInfo(ctx, volumeAttachments)
   119  	if err != nil {
   120  		return errors.Trace(err)
   121  	}
   122  	return nil
   123  }
   124  
   125  // attachVolumes creates volume attachments with the specified parameters.
   126  func attachVolumes(ctx *context, ops map[params.MachineStorageId]*attachVolumeOp) error {
   127  	volumeAttachmentParams := make([]storage.VolumeAttachmentParams, 0, len(ops))
   128  	for _, op := range ops {
   129  		volumeAttachmentParams = append(volumeAttachmentParams, op.args)
   130  	}
   131  	paramsBySource, volumeSources, err := volumeAttachmentParamsBySource(
   132  		ctx.modelConfig, ctx.config.StorageDir, volumeAttachmentParams,
   133  	)
   134  	if err != nil {
   135  		return errors.Trace(err)
   136  	}
   137  	var reschedule []scheduleOp
   138  	var volumeAttachments []storage.VolumeAttachment
   139  	var statuses []params.EntityStatusArgs
   140  	for sourceName, volumeAttachmentParams := range paramsBySource {
   141  		logger.Debugf("attaching volumes: %+v", volumeAttachmentParams)
   142  		volumeSource := volumeSources[sourceName]
   143  		results, err := volumeSource.AttachVolumes(volumeAttachmentParams)
   144  		if err != nil {
   145  			return errors.Annotatef(err, "attaching volumes from source %q", sourceName)
   146  		}
   147  		for i, result := range results {
   148  			p := volumeAttachmentParams[i]
   149  			statuses = append(statuses, params.EntityStatusArgs{
   150  				Tag:    p.Volume.String(),
   151  				Status: status.StatusAttached,
   152  			})
   153  			entityStatus := &statuses[len(statuses)-1]
   154  			if result.Error != nil {
   155  				// Reschedule the volume attachment.
   156  				id := params.MachineStorageId{
   157  					MachineTag:    p.Machine.String(),
   158  					AttachmentTag: p.Volume.String(),
   159  				}
   160  				reschedule = append(reschedule, ops[id])
   161  
   162  				// Note: we keep the status as "attaching" to
   163  				// indicate that we will retry. When we distinguish
   164  				// between transient and permanent errors, we will
   165  				// set the status to "error" for permanent errors.
   166  				entityStatus.Status = status.StatusAttaching
   167  				entityStatus.Info = result.Error.Error()
   168  				logger.Debugf(
   169  					"failed to attach %s to %s: %v",
   170  					names.ReadableString(p.Volume),
   171  					names.ReadableString(p.Machine),
   172  					result.Error,
   173  				)
   174  				continue
   175  			}
   176  			volumeAttachments = append(volumeAttachments, *result.VolumeAttachment)
   177  		}
   178  	}
   179  	scheduleOperations(ctx, reschedule...)
   180  	setStatus(ctx, statuses)
   181  	if err := setVolumeAttachmentInfo(ctx, volumeAttachments); err != nil {
   182  		return errors.Trace(err)
   183  	}
   184  	return nil
   185  }
   186  
   187  // destroyVolumes destroys volumes with the specified parameters.
   188  func destroyVolumes(ctx *context, ops map[names.VolumeTag]*destroyVolumeOp) error {
   189  	tags := make([]names.VolumeTag, 0, len(ops))
   190  	for tag := range ops {
   191  		tags = append(tags, tag)
   192  	}
   193  	volumeParams, err := volumeParams(ctx, tags)
   194  	if err != nil {
   195  		return errors.Trace(err)
   196  	}
   197  	paramsBySource, volumeSources, err := volumeParamsBySource(
   198  		ctx.modelConfig, ctx.config.StorageDir, volumeParams,
   199  	)
   200  	if err != nil {
   201  		return errors.Trace(err)
   202  	}
   203  	var remove []names.Tag
   204  	var reschedule []scheduleOp
   205  	var statuses []params.EntityStatusArgs
   206  	for sourceName, volumeParams := range paramsBySource {
   207  		logger.Debugf("destroying volumes from %q: %v", sourceName, volumeParams)
   208  		volumeSource := volumeSources[sourceName]
   209  		validVolumeParams, validationErrors := validateVolumeParams(volumeSource, volumeParams)
   210  		for i, err := range validationErrors {
   211  			if err == nil {
   212  				continue
   213  			}
   214  			statuses = append(statuses, params.EntityStatusArgs{
   215  				Tag:    volumeParams[i].Tag.String(),
   216  				Status: status.StatusError,
   217  				Info:   err.Error(),
   218  			})
   219  			logger.Debugf(
   220  				"failed to validate parameters for %s: %v",
   221  				names.ReadableString(volumeParams[i].Tag), err,
   222  			)
   223  		}
   224  		volumeParams = validVolumeParams
   225  		if len(volumeParams) == 0 {
   226  			continue
   227  		}
   228  		volumeIds := make([]string, len(volumeParams))
   229  		for i, volumeParams := range volumeParams {
   230  			volume, ok := ctx.volumes[volumeParams.Tag]
   231  			if !ok {
   232  				return errors.NotFoundf("volume %s", volumeParams.Tag.Id())
   233  			}
   234  			volumeIds[i] = volume.VolumeId
   235  		}
   236  		errs, err := volumeSource.DestroyVolumes(volumeIds)
   237  		if err != nil {
   238  			return errors.Trace(err)
   239  		}
   240  		for i, err := range errs {
   241  			tag := volumeParams[i].Tag
   242  			if err == nil {
   243  				remove = append(remove, tag)
   244  				continue
   245  			}
   246  			// Failed to destroy volume; reschedule and update status.
   247  			reschedule = append(reschedule, ops[tag])
   248  			statuses = append(statuses, params.EntityStatusArgs{
   249  				Tag:    tag.String(),
   250  				Status: status.StatusDestroying,
   251  				Info:   err.Error(),
   252  			})
   253  		}
   254  	}
   255  	scheduleOperations(ctx, reschedule...)
   256  	setStatus(ctx, statuses)
   257  	if err := removeEntities(ctx, remove); err != nil {
   258  		return errors.Annotate(err, "removing volumes from state")
   259  	}
   260  	return nil
   261  }
   262  
   263  // detachVolumes destroys volume attachments with the specified parameters.
   264  func detachVolumes(ctx *context, ops map[params.MachineStorageId]*detachVolumeOp) error {
   265  	volumeAttachmentParams := make([]storage.VolumeAttachmentParams, 0, len(ops))
   266  	for _, op := range ops {
   267  		volumeAttachmentParams = append(volumeAttachmentParams, op.args)
   268  	}
   269  	paramsBySource, volumeSources, err := volumeAttachmentParamsBySource(
   270  		ctx.modelConfig, ctx.config.StorageDir, volumeAttachmentParams,
   271  	)
   272  	if err != nil {
   273  		return errors.Trace(err)
   274  	}
   275  	var reschedule []scheduleOp
   276  	var statuses []params.EntityStatusArgs
   277  	var remove []params.MachineStorageId
   278  	for sourceName, volumeAttachmentParams := range paramsBySource {
   279  		logger.Debugf("detaching volumes: %+v", volumeAttachmentParams)
   280  		volumeSource := volumeSources[sourceName]
   281  		errs, err := volumeSource.DetachVolumes(volumeAttachmentParams)
   282  		if err != nil {
   283  			return errors.Annotatef(err, "detaching volumes from source %q", sourceName)
   284  		}
   285  		for i, err := range errs {
   286  			p := volumeAttachmentParams[i]
   287  			statuses = append(statuses, params.EntityStatusArgs{
   288  				Tag: p.Volume.String(),
   289  				// TODO(axw) when we support multiple
   290  				// attachment, we'll have to check if
   291  				// there are any other attachments
   292  				// before saying the status "detached".
   293  				Status: status.StatusDetached,
   294  			})
   295  			id := params.MachineStorageId{
   296  				MachineTag:    p.Machine.String(),
   297  				AttachmentTag: p.Volume.String(),
   298  			}
   299  			entityStatus := &statuses[len(statuses)-1]
   300  			if err != nil {
   301  				reschedule = append(reschedule, ops[id])
   302  				entityStatus.Status = status.StatusDetaching
   303  				entityStatus.Info = err.Error()
   304  				logger.Debugf(
   305  					"failed to detach %s from %s: %v",
   306  					names.ReadableString(p.Volume),
   307  					names.ReadableString(p.Machine),
   308  					err,
   309  				)
   310  				continue
   311  			}
   312  			remove = append(remove, id)
   313  		}
   314  	}
   315  	scheduleOperations(ctx, reschedule...)
   316  	setStatus(ctx, statuses)
   317  	if err := removeAttachments(ctx, remove); err != nil {
   318  		return errors.Annotate(err, "removing attachments from state")
   319  	}
   320  	for _, id := range remove {
   321  		delete(ctx.volumeAttachments, id)
   322  	}
   323  	return nil
   324  }
   325  
   326  // volumeParamsBySource separates the volume parameters by volume source.
   327  func volumeParamsBySource(
   328  	environConfig *config.Config,
   329  	baseStorageDir string,
   330  	params []storage.VolumeParams,
   331  ) (map[string][]storage.VolumeParams, map[string]storage.VolumeSource, error) {
   332  	// TODO(axw) later we may have multiple instantiations (sources)
   333  	// for a storage provider, e.g. multiple Ceph installations. For
   334  	// now we assume a single source for each provider type, with no
   335  	// configuration.
   336  	volumeSources := make(map[string]storage.VolumeSource)
   337  	for _, params := range params {
   338  		sourceName := string(params.Provider)
   339  		if _, ok := volumeSources[sourceName]; ok {
   340  			continue
   341  		}
   342  		volumeSource, err := volumeSource(
   343  			environConfig, baseStorageDir, sourceName, params.Provider,
   344  		)
   345  		if errors.Cause(err) == errNonDynamic {
   346  			volumeSource = nil
   347  		} else if err != nil {
   348  			return nil, nil, errors.Annotate(err, "getting volume source")
   349  		}
   350  		volumeSources[sourceName] = volumeSource
   351  	}
   352  	paramsBySource := make(map[string][]storage.VolumeParams)
   353  	for _, params := range params {
   354  		sourceName := string(params.Provider)
   355  		volumeSource := volumeSources[sourceName]
   356  		if volumeSource == nil {
   357  			// Ignore nil volume sources; this means that the
   358  			// volume should be created by the machine-provisioner.
   359  			continue
   360  		}
   361  		paramsBySource[sourceName] = append(paramsBySource[sourceName], params)
   362  	}
   363  	return paramsBySource, volumeSources, nil
   364  }
   365  
   366  // validateVolumeParams validates a collection of volume parameters.
   367  func validateVolumeParams(
   368  	volumeSource storage.VolumeSource, volumeParams []storage.VolumeParams,
   369  ) ([]storage.VolumeParams, []error) {
   370  	valid := make([]storage.VolumeParams, 0, len(volumeParams))
   371  	results := make([]error, len(volumeParams))
   372  	for i, params := range volumeParams {
   373  		err := volumeSource.ValidateVolumeParams(params)
   374  		if err == nil {
   375  			valid = append(valid, params)
   376  		}
   377  		results[i] = err
   378  	}
   379  	return valid, results
   380  }
   381  
   382  // volumeAttachmentParamsBySource separates the volume attachment parameters by volume source.
   383  func volumeAttachmentParamsBySource(
   384  	environConfig *config.Config,
   385  	baseStorageDir string,
   386  	params []storage.VolumeAttachmentParams,
   387  ) (map[string][]storage.VolumeAttachmentParams, map[string]storage.VolumeSource, error) {
   388  	// TODO(axw) later we may have multiple instantiations (sources)
   389  	// for a storage provider, e.g. multiple Ceph installations. For
   390  	// now we assume a single source for each provider type, with no
   391  	// configuration.
   392  	volumeSources := make(map[string]storage.VolumeSource)
   393  	paramsBySource := make(map[string][]storage.VolumeAttachmentParams)
   394  	for _, params := range params {
   395  		sourceName := string(params.Provider)
   396  		paramsBySource[sourceName] = append(paramsBySource[sourceName], params)
   397  		if _, ok := volumeSources[sourceName]; ok {
   398  			continue
   399  		}
   400  		volumeSource, err := volumeSource(
   401  			environConfig, baseStorageDir, sourceName, params.Provider,
   402  		)
   403  		if err != nil {
   404  			return nil, nil, errors.Annotate(err, "getting volume source")
   405  		}
   406  		volumeSources[sourceName] = volumeSource
   407  	}
   408  	return paramsBySource, volumeSources, nil
   409  }
   410  
   411  func setVolumeAttachmentInfo(ctx *context, volumeAttachments []storage.VolumeAttachment) error {
   412  	if len(volumeAttachments) == 0 {
   413  		return nil
   414  	}
   415  	// TODO(axw) we need to be able to list volume attachments in the
   416  	// provider, by environment, so that we can "harvest" them if they're
   417  	// unknown. This will take care of killing volumes that we fail to
   418  	// record in state.
   419  	errorResults, err := ctx.config.Volumes.SetVolumeAttachmentInfo(
   420  		volumeAttachmentsFromStorage(volumeAttachments),
   421  	)
   422  	if err != nil {
   423  		return errors.Annotate(err, "publishing volumes to state")
   424  	}
   425  	for i, result := range errorResults {
   426  		if result.Error != nil {
   427  			return errors.Annotatef(
   428  				result.Error, "publishing attachment of %s to %s to state",
   429  				names.ReadableString(volumeAttachments[i].Volume),
   430  				names.ReadableString(volumeAttachments[i].Machine),
   431  			)
   432  		}
   433  		// Record the volume attachment in the context.
   434  		id := params.MachineStorageId{
   435  			MachineTag:    volumeAttachments[i].Machine.String(),
   436  			AttachmentTag: volumeAttachments[i].Volume.String(),
   437  		}
   438  		ctx.volumeAttachments[id] = volumeAttachments[i]
   439  		removePendingVolumeAttachment(ctx, id)
   440  	}
   441  	return nil
   442  }
   443  
   444  type createVolumeOp struct {
   445  	exponentialBackoff
   446  	args storage.VolumeParams
   447  }
   448  
   449  func (op *createVolumeOp) key() interface{} {
   450  	return op.args.Tag
   451  }
   452  
   453  type destroyVolumeOp struct {
   454  	exponentialBackoff
   455  	tag names.VolumeTag
   456  }
   457  
   458  func (op *destroyVolumeOp) key() interface{} {
   459  	return op.tag
   460  }
   461  
   462  type attachVolumeOp struct {
   463  	exponentialBackoff
   464  	args storage.VolumeAttachmentParams
   465  }
   466  
   467  func (op *attachVolumeOp) key() interface{} {
   468  	return params.MachineStorageId{
   469  		MachineTag:    op.args.Machine.String(),
   470  		AttachmentTag: op.args.Volume.String(),
   471  	}
   472  }
   473  
   474  type detachVolumeOp struct {
   475  	exponentialBackoff
   476  	args storage.VolumeAttachmentParams
   477  }
   478  
   479  func (op *detachVolumeOp) key() interface{} {
   480  	return params.MachineStorageId{
   481  		MachineTag:    op.args.Machine.String(),
   482  		AttachmentTag: op.args.Volume.String(),
   483  	}
   484  }