github.com/axw/juju@v0.0.0-20161005053422-4bd6544d08d4/worker/storageprovisioner/volume_ops.go (about)

     1  // Copyright 2015 Canonical Ltd.
     2  // Licensed under the AGPLv3, see LICENCE file for details.
     3  
     4  package storageprovisioner
     5  
     6  import (
     7  	"github.com/juju/errors"
     8  	"gopkg.in/juju/names.v2"
     9  
    10  	"github.com/juju/juju/apiserver/params"
    11  	"github.com/juju/juju/status"
    12  	"github.com/juju/juju/storage"
    13  )
    14  
    15  // createVolumes creates volumes with the specified parameters.
    16  func createVolumes(ctx *context, ops map[names.VolumeTag]*createVolumeOp) error {
    17  	volumeParams := make([]storage.VolumeParams, 0, len(ops))
    18  	for _, op := range ops {
    19  		volumeParams = append(volumeParams, op.args)
    20  	}
    21  	paramsBySource, volumeSources, err := volumeParamsBySource(
    22  		ctx.config.StorageDir, volumeParams, ctx.config.Registry,
    23  	)
    24  	if err != nil {
    25  		return errors.Trace(err)
    26  	}
    27  	var reschedule []scheduleOp
    28  	var volumes []storage.Volume
    29  	var volumeAttachments []storage.VolumeAttachment
    30  	var statuses []params.EntityStatusArgs
    31  	for sourceName, volumeParams := range paramsBySource {
    32  		logger.Debugf("creating volumes: %v", volumeParams)
    33  		volumeSource := volumeSources[sourceName]
    34  		validVolumeParams, validationErrors := validateVolumeParams(volumeSource, volumeParams)
    35  		for i, err := range validationErrors {
    36  			if err == nil {
    37  				continue
    38  			}
    39  			statuses = append(statuses, params.EntityStatusArgs{
    40  				Tag:    volumeParams[i].Tag.String(),
    41  				Status: status.Error.String(),
    42  				Info:   err.Error(),
    43  			})
    44  			logger.Debugf(
    45  				"failed to validate parameters for %s: %v",
    46  				names.ReadableString(volumeParams[i].Tag), err,
    47  			)
    48  		}
    49  		volumeParams = validVolumeParams
    50  		if len(volumeParams) == 0 {
    51  			continue
    52  		}
    53  		results, err := volumeSource.CreateVolumes(volumeParams)
    54  		if err != nil {
    55  			return errors.Annotatef(err, "creating volumes from source %q", sourceName)
    56  		}
    57  		for i, result := range results {
    58  			statuses = append(statuses, params.EntityStatusArgs{
    59  				Tag:    volumeParams[i].Tag.String(),
    60  				Status: status.Attaching.String(),
    61  			})
    62  			entityStatus := &statuses[len(statuses)-1]
    63  			if result.Error != nil {
    64  				// Reschedule the volume creation.
    65  				reschedule = append(reschedule, ops[volumeParams[i].Tag])
    66  
    67  				// Note: we keep the status as "pending" to indicate
    68  				// that we will retry. When we distinguish between
    69  				// transient and permanent errors, we will set the
    70  				// status to "error" for permanent errors.
    71  				entityStatus.Status = status.Pending.String()
    72  				entityStatus.Info = result.Error.Error()
    73  				logger.Debugf(
    74  					"failed to create %s: %v",
    75  					names.ReadableString(volumeParams[i].Tag),
    76  					result.Error,
    77  				)
    78  				continue
    79  			}
    80  			volumes = append(volumes, *result.Volume)
    81  			if result.VolumeAttachment != nil {
    82  				entityStatus.Status = status.Attached.String()
    83  				volumeAttachments = append(volumeAttachments, *result.VolumeAttachment)
    84  			}
    85  		}
    86  	}
    87  	scheduleOperations(ctx, reschedule...)
    88  	setStatus(ctx, statuses)
    89  	if len(volumes) == 0 {
    90  		return nil
    91  	}
    92  	// TODO(axw) we need to be able to list volumes in the provider,
    93  	// by environment, so that we can "harvest" them if they're
    94  	// unknown. This will take care of killing volumes that we fail
    95  	// to record in state.
    96  	errorResults, err := ctx.config.Volumes.SetVolumeInfo(volumesFromStorage(volumes))
    97  	if err != nil {
    98  		return errors.Annotate(err, "publishing volumes to state")
    99  	}
   100  	for i, result := range errorResults {
   101  		if result.Error != nil {
   102  			logger.Errorf(
   103  				"publishing volume %s to state: %v",
   104  				volumes[i].Tag.Id(),
   105  				result.Error,
   106  			)
   107  		}
   108  	}
   109  	for _, v := range volumes {
   110  		updateVolume(ctx, v)
   111  	}
   112  	// Note: the storage provisioner that creates a volume is also
   113  	// responsible for creating the volume attachment. It is therefore
   114  	// safe to set the volume attachment info after the volume info,
   115  	// without leading to the possibility of concurrent, duplicate
   116  	// attachments.
   117  	err = setVolumeAttachmentInfo(ctx, volumeAttachments)
   118  	if err != nil {
   119  		return errors.Trace(err)
   120  	}
   121  	return nil
   122  }
   123  
   124  // attachVolumes creates volume attachments with the specified parameters.
   125  func attachVolumes(ctx *context, ops map[params.MachineStorageId]*attachVolumeOp) error {
   126  	volumeAttachmentParams := make([]storage.VolumeAttachmentParams, 0, len(ops))
   127  	for _, op := range ops {
   128  		volumeAttachmentParams = append(volumeAttachmentParams, op.args)
   129  	}
   130  	paramsBySource, volumeSources, err := volumeAttachmentParamsBySource(
   131  		ctx.config.StorageDir, volumeAttachmentParams, ctx.config.Registry,
   132  	)
   133  	if err != nil {
   134  		return errors.Trace(err)
   135  	}
   136  	var reschedule []scheduleOp
   137  	var volumeAttachments []storage.VolumeAttachment
   138  	var statuses []params.EntityStatusArgs
   139  	for sourceName, volumeAttachmentParams := range paramsBySource {
   140  		logger.Debugf("attaching volumes: %+v", volumeAttachmentParams)
   141  		volumeSource := volumeSources[sourceName]
   142  		results, err := volumeSource.AttachVolumes(volumeAttachmentParams)
   143  		if err != nil {
   144  			return errors.Annotatef(err, "attaching volumes from source %q", sourceName)
   145  		}
   146  		for i, result := range results {
   147  			p := volumeAttachmentParams[i]
   148  			statuses = append(statuses, params.EntityStatusArgs{
   149  				Tag:    p.Volume.String(),
   150  				Status: status.Attached.String(),
   151  			})
   152  			entityStatus := &statuses[len(statuses)-1]
   153  			if result.Error != nil {
   154  				// Reschedule the volume attachment.
   155  				id := params.MachineStorageId{
   156  					MachineTag:    p.Machine.String(),
   157  					AttachmentTag: p.Volume.String(),
   158  				}
   159  				reschedule = append(reschedule, ops[id])
   160  
   161  				// Note: we keep the status as "attaching" to
   162  				// indicate that we will retry. When we distinguish
   163  				// between transient and permanent errors, we will
   164  				// set the status to "error" for permanent errors.
   165  				entityStatus.Status = status.Attaching.String()
   166  				entityStatus.Info = result.Error.Error()
   167  				logger.Debugf(
   168  					"failed to attach %s to %s: %v",
   169  					names.ReadableString(p.Volume),
   170  					names.ReadableString(p.Machine),
   171  					result.Error,
   172  				)
   173  				continue
   174  			}
   175  			volumeAttachments = append(volumeAttachments, *result.VolumeAttachment)
   176  		}
   177  	}
   178  	scheduleOperations(ctx, reschedule...)
   179  	setStatus(ctx, statuses)
   180  	if err := setVolumeAttachmentInfo(ctx, volumeAttachments); err != nil {
   181  		return errors.Trace(err)
   182  	}
   183  	return nil
   184  }
   185  
   186  // destroyVolumes destroys volumes with the specified parameters.
   187  func destroyVolumes(ctx *context, ops map[names.VolumeTag]*destroyVolumeOp) error {
   188  	tags := make([]names.VolumeTag, 0, len(ops))
   189  	for tag := range ops {
   190  		tags = append(tags, tag)
   191  	}
   192  	volumeParams, err := volumeParams(ctx, tags)
   193  	if err != nil {
   194  		return errors.Trace(err)
   195  	}
   196  	paramsBySource, volumeSources, err := volumeParamsBySource(
   197  		ctx.config.StorageDir, volumeParams, ctx.config.Registry,
   198  	)
   199  	if err != nil {
   200  		return errors.Trace(err)
   201  	}
   202  	var remove []names.Tag
   203  	var reschedule []scheduleOp
   204  	var statuses []params.EntityStatusArgs
   205  	for sourceName, volumeParams := range paramsBySource {
   206  		logger.Debugf("destroying volumes from %q: %v", sourceName, volumeParams)
   207  		volumeSource := volumeSources[sourceName]
   208  		validVolumeParams, validationErrors := validateVolumeParams(volumeSource, volumeParams)
   209  		for i, err := range validationErrors {
   210  			if err == nil {
   211  				continue
   212  			}
   213  			statuses = append(statuses, params.EntityStatusArgs{
   214  				Tag:    volumeParams[i].Tag.String(),
   215  				Status: status.Error.String(),
   216  				Info:   err.Error(),
   217  			})
   218  			logger.Debugf(
   219  				"failed to validate parameters for %s: %v",
   220  				names.ReadableString(volumeParams[i].Tag), err,
   221  			)
   222  		}
   223  		volumeParams = validVolumeParams
   224  		if len(volumeParams) == 0 {
   225  			continue
   226  		}
   227  		volumeIds := make([]string, len(volumeParams))
   228  		for i, volumeParams := range volumeParams {
   229  			volume, ok := ctx.volumes[volumeParams.Tag]
   230  			if !ok {
   231  				return errors.NotFoundf("volume %s", volumeParams.Tag.Id())
   232  			}
   233  			volumeIds[i] = volume.VolumeId
   234  		}
   235  		errs, err := volumeSource.DestroyVolumes(volumeIds)
   236  		if err != nil {
   237  			return errors.Trace(err)
   238  		}
   239  		for i, err := range errs {
   240  			tag := volumeParams[i].Tag
   241  			if err == nil {
   242  				remove = append(remove, tag)
   243  				continue
   244  			}
   245  			// Failed to destroy volume; reschedule and update status.
   246  			reschedule = append(reschedule, ops[tag])
   247  			statuses = append(statuses, params.EntityStatusArgs{
   248  				Tag:    tag.String(),
   249  				Status: status.Destroying.String(),
   250  				Info:   err.Error(),
   251  			})
   252  		}
   253  	}
   254  	scheduleOperations(ctx, reschedule...)
   255  	setStatus(ctx, statuses)
   256  	if err := removeEntities(ctx, remove); err != nil {
   257  		return errors.Annotate(err, "removing volumes from state")
   258  	}
   259  	return nil
   260  }
   261  
   262  // detachVolumes destroys volume attachments with the specified parameters.
   263  func detachVolumes(ctx *context, ops map[params.MachineStorageId]*detachVolumeOp) error {
   264  	volumeAttachmentParams := make([]storage.VolumeAttachmentParams, 0, len(ops))
   265  	for _, op := range ops {
   266  		volumeAttachmentParams = append(volumeAttachmentParams, op.args)
   267  	}
   268  	paramsBySource, volumeSources, err := volumeAttachmentParamsBySource(
   269  		ctx.config.StorageDir, volumeAttachmentParams, ctx.config.Registry,
   270  	)
   271  	if err != nil {
   272  		return errors.Trace(err)
   273  	}
   274  	var reschedule []scheduleOp
   275  	var statuses []params.EntityStatusArgs
   276  	var remove []params.MachineStorageId
   277  	for sourceName, volumeAttachmentParams := range paramsBySource {
   278  		logger.Debugf("detaching volumes: %+v", volumeAttachmentParams)
   279  		volumeSource := volumeSources[sourceName]
   280  		errs, err := volumeSource.DetachVolumes(volumeAttachmentParams)
   281  		if err != nil {
   282  			return errors.Annotatef(err, "detaching volumes from source %q", sourceName)
   283  		}
   284  		for i, err := range errs {
   285  			p := volumeAttachmentParams[i]
   286  			statuses = append(statuses, params.EntityStatusArgs{
   287  				Tag: p.Volume.String(),
   288  				// TODO(axw) when we support multiple
   289  				// attachment, we'll have to check if
   290  				// there are any other attachments
   291  				// before saying the status "detached".
   292  				Status: status.Detached.String(),
   293  			})
   294  			id := params.MachineStorageId{
   295  				MachineTag:    p.Machine.String(),
   296  				AttachmentTag: p.Volume.String(),
   297  			}
   298  			entityStatus := &statuses[len(statuses)-1]
   299  			if err != nil {
   300  				reschedule = append(reschedule, ops[id])
   301  				entityStatus.Status = status.Detaching.String()
   302  				entityStatus.Info = err.Error()
   303  				logger.Debugf(
   304  					"failed to detach %s from %s: %v",
   305  					names.ReadableString(p.Volume),
   306  					names.ReadableString(p.Machine),
   307  					err,
   308  				)
   309  				continue
   310  			}
   311  			remove = append(remove, id)
   312  		}
   313  	}
   314  	scheduleOperations(ctx, reschedule...)
   315  	setStatus(ctx, statuses)
   316  	if err := removeAttachments(ctx, remove); err != nil {
   317  		return errors.Annotate(err, "removing attachments from state")
   318  	}
   319  	for _, id := range remove {
   320  		delete(ctx.volumeAttachments, id)
   321  	}
   322  	return nil
   323  }
   324  
   325  // volumeParamsBySource separates the volume parameters by volume source.
   326  func volumeParamsBySource(
   327  	baseStorageDir string,
   328  	params []storage.VolumeParams,
   329  	registry storage.ProviderRegistry,
   330  ) (map[string][]storage.VolumeParams, map[string]storage.VolumeSource, error) {
   331  	// TODO(axw) later we may have multiple instantiations (sources)
   332  	// for a storage provider, e.g. multiple Ceph installations. For
   333  	// now we assume a single source for each provider type, with no
   334  	// configuration.
   335  	volumeSources := make(map[string]storage.VolumeSource)
   336  	for _, params := range params {
   337  		sourceName := string(params.Provider)
   338  		if _, ok := volumeSources[sourceName]; ok {
   339  			continue
   340  		}
   341  		volumeSource, err := volumeSource(
   342  			baseStorageDir, sourceName, params.Provider, registry,
   343  		)
   344  		if errors.Cause(err) == errNonDynamic {
   345  			volumeSource = nil
   346  		} else if err != nil {
   347  			return nil, nil, errors.Annotate(err, "getting volume source")
   348  		}
   349  		volumeSources[sourceName] = volumeSource
   350  	}
   351  	paramsBySource := make(map[string][]storage.VolumeParams)
   352  	for _, params := range params {
   353  		sourceName := string(params.Provider)
   354  		volumeSource := volumeSources[sourceName]
   355  		if volumeSource == nil {
   356  			// Ignore nil volume sources; this means that the
   357  			// volume should be created by the machine-provisioner.
   358  			continue
   359  		}
   360  		paramsBySource[sourceName] = append(paramsBySource[sourceName], params)
   361  	}
   362  	return paramsBySource, volumeSources, nil
   363  }
   364  
   365  // validateVolumeParams validates a collection of volume parameters.
   366  func validateVolumeParams(
   367  	volumeSource storage.VolumeSource, volumeParams []storage.VolumeParams,
   368  ) ([]storage.VolumeParams, []error) {
   369  	valid := make([]storage.VolumeParams, 0, len(volumeParams))
   370  	results := make([]error, len(volumeParams))
   371  	for i, params := range volumeParams {
   372  		err := volumeSource.ValidateVolumeParams(params)
   373  		if err == nil {
   374  			valid = append(valid, params)
   375  		}
   376  		results[i] = err
   377  	}
   378  	return valid, results
   379  }
   380  
   381  // volumeAttachmentParamsBySource separates the volume attachment parameters by volume source.
   382  func volumeAttachmentParamsBySource(
   383  	baseStorageDir string,
   384  	params []storage.VolumeAttachmentParams,
   385  	registry storage.ProviderRegistry,
   386  ) (map[string][]storage.VolumeAttachmentParams, map[string]storage.VolumeSource, error) {
   387  	// TODO(axw) later we may have multiple instantiations (sources)
   388  	// for a storage provider, e.g. multiple Ceph installations. For
   389  	// now we assume a single source for each provider type, with no
   390  	// configuration.
   391  	volumeSources := make(map[string]storage.VolumeSource)
   392  	paramsBySource := make(map[string][]storage.VolumeAttachmentParams)
   393  	for _, params := range params {
   394  		sourceName := string(params.Provider)
   395  		paramsBySource[sourceName] = append(paramsBySource[sourceName], params)
   396  		if _, ok := volumeSources[sourceName]; ok {
   397  			continue
   398  		}
   399  		volumeSource, err := volumeSource(
   400  			baseStorageDir, sourceName, params.Provider, registry,
   401  		)
   402  		if err != nil {
   403  			return nil, nil, errors.Annotate(err, "getting volume source")
   404  		}
   405  		volumeSources[sourceName] = volumeSource
   406  	}
   407  	return paramsBySource, volumeSources, nil
   408  }
   409  
   410  func setVolumeAttachmentInfo(ctx *context, volumeAttachments []storage.VolumeAttachment) error {
   411  	if len(volumeAttachments) == 0 {
   412  		return nil
   413  	}
   414  	// TODO(axw) we need to be able to list volume attachments in the
   415  	// provider, by environment, so that we can "harvest" them if they're
   416  	// unknown. This will take care of killing volumes that we fail to
   417  	// record in state.
   418  	errorResults, err := ctx.config.Volumes.SetVolumeAttachmentInfo(
   419  		volumeAttachmentsFromStorage(volumeAttachments),
   420  	)
   421  	if err != nil {
   422  		return errors.Annotate(err, "publishing volumes to state")
   423  	}
   424  	for i, result := range errorResults {
   425  		if result.Error != nil {
   426  			return errors.Annotatef(
   427  				result.Error, "publishing attachment of %s to %s to state",
   428  				names.ReadableString(volumeAttachments[i].Volume),
   429  				names.ReadableString(volumeAttachments[i].Machine),
   430  			)
   431  		}
   432  		// Record the volume attachment in the context.
   433  		id := params.MachineStorageId{
   434  			MachineTag:    volumeAttachments[i].Machine.String(),
   435  			AttachmentTag: volumeAttachments[i].Volume.String(),
   436  		}
   437  		ctx.volumeAttachments[id] = volumeAttachments[i]
   438  		removePendingVolumeAttachment(ctx, id)
   439  	}
   440  	return nil
   441  }
   442  
   443  type createVolumeOp struct {
   444  	exponentialBackoff
   445  	args storage.VolumeParams
   446  }
   447  
   448  func (op *createVolumeOp) key() interface{} {
   449  	return op.args.Tag
   450  }
   451  
   452  type destroyVolumeOp struct {
   453  	exponentialBackoff
   454  	tag names.VolumeTag
   455  }
   456  
   457  func (op *destroyVolumeOp) key() interface{} {
   458  	return op.tag
   459  }
   460  
   461  type attachVolumeOp struct {
   462  	exponentialBackoff
   463  	args storage.VolumeAttachmentParams
   464  }
   465  
   466  func (op *attachVolumeOp) key() interface{} {
   467  	return params.MachineStorageId{
   468  		MachineTag:    op.args.Machine.String(),
   469  		AttachmentTag: op.args.Volume.String(),
   470  	}
   471  }
   472  
   473  type detachVolumeOp struct {
   474  	exponentialBackoff
   475  	args storage.VolumeAttachmentParams
   476  }
   477  
   478  func (op *detachVolumeOp) key() interface{} {
   479  	return params.MachineStorageId{
   480  		MachineTag:    op.args.Machine.String(),
   481  		AttachmentTag: op.args.Volume.String(),
   482  	}
   483  }