github.com/niedbalski/juju@v0.0.0-20190215020005-8ff100488e47/worker/storageprovisioner/volume_ops.go (about)

     1  // Copyright 2015 Canonical Ltd.
     2  // Licensed under the AGPLv3, see LICENCE file for details.
     3  
     4  package storageprovisioner
     5  
     6  import (
     7  	"github.com/juju/errors"
     8  	"gopkg.in/juju/names.v2"
     9  
    10  	"github.com/juju/juju/apiserver/params"
    11  	"github.com/juju/juju/core/status"
    12  	environscontext "github.com/juju/juju/environs/context"
    13  	"github.com/juju/juju/storage"
    14  )
    15  
    16  // createVolumes creates volumes with the specified parameters.
    17  func createVolumes(ctx *context, ops map[names.VolumeTag]*createVolumeOp) error {
    18  	volumeParams := make([]storage.VolumeParams, 0, len(ops))
    19  	for _, op := range ops {
    20  		volumeParams = append(volumeParams, op.args)
    21  	}
    22  	paramsBySource, volumeSources, err := volumeParamsBySource(
    23  		ctx.config.StorageDir, volumeParams, ctx.config.Registry,
    24  	)
    25  	if err != nil {
    26  		return errors.Trace(err)
    27  	}
    28  	var reschedule []scheduleOp
    29  	var volumes []storage.Volume
    30  	var volumeAttachments []storage.VolumeAttachment
    31  	var statuses []params.EntityStatusArgs
    32  	for sourceName, volumeParams := range paramsBySource {
    33  		logger.Debugf("creating volumes: %v", volumeParams)
    34  		volumeSource := volumeSources[sourceName]
    35  		validVolumeParams, validationErrors := validateVolumeParams(volumeSource, volumeParams)
    36  		for i, err := range validationErrors {
    37  			if err == nil {
    38  				continue
    39  			}
    40  			statuses = append(statuses, params.EntityStatusArgs{
    41  				Tag:    volumeParams[i].Tag.String(),
    42  				Status: status.Error.String(),
    43  				Info:   err.Error(),
    44  			})
    45  			logger.Debugf(
    46  				"failed to validate parameters for %s: %v",
    47  				names.ReadableString(volumeParams[i].Tag), err,
    48  			)
    49  		}
    50  		volumeParams = validVolumeParams
    51  		if len(volumeParams) == 0 {
    52  			continue
    53  		}
    54  		results, err := volumeSource.CreateVolumes(ctx.config.CloudCallContext, volumeParams)
    55  		if err != nil {
    56  			return errors.Annotatef(err, "creating volumes from source %q", sourceName)
    57  		}
    58  		for i, result := range results {
    59  			statuses = append(statuses, params.EntityStatusArgs{
    60  				Tag:    volumeParams[i].Tag.String(),
    61  				Status: status.Attaching.String(),
    62  			})
    63  			entityStatus := &statuses[len(statuses)-1]
    64  			if result.Error != nil {
    65  				// Reschedule the volume creation.
    66  				reschedule = append(reschedule, ops[volumeParams[i].Tag])
    67  
    68  				// Note: we keep the status as "pending" to indicate
    69  				// that we will retry. When we distinguish between
    70  				// transient and permanent errors, we will set the
    71  				// status to "error" for permanent errors.
    72  				entityStatus.Status = status.Pending.String()
    73  				entityStatus.Info = result.Error.Error()
    74  				logger.Debugf(
    75  					"failed to create %s: %v",
    76  					names.ReadableString(volumeParams[i].Tag),
    77  					result.Error,
    78  				)
    79  				continue
    80  			}
    81  			volumes = append(volumes, *result.Volume)
    82  			if result.VolumeAttachment != nil {
    83  				entityStatus.Status = status.Attached.String()
    84  				volumeAttachments = append(volumeAttachments, *result.VolumeAttachment)
    85  			}
    86  		}
    87  	}
    88  	scheduleOperations(ctx, reschedule...)
    89  	setStatus(ctx, statuses)
    90  	if len(volumes) == 0 {
    91  		return nil
    92  	}
    93  	// TODO(axw) we need to be able to list volumes in the provider,
    94  	// by environment, so that we can "harvest" them if they're
    95  	// unknown. This will take care of killing volumes that we fail
    96  	// to record in state.
    97  	errorResults, err := ctx.config.Volumes.SetVolumeInfo(volumesFromStorage(volumes))
    98  	if err != nil {
    99  		return errors.Annotate(err, "publishing volumes to state")
   100  	}
   101  	for i, result := range errorResults {
   102  		if result.Error != nil {
   103  			logger.Errorf(
   104  				"publishing volume %s to state: %v",
   105  				volumes[i].Tag.Id(),
   106  				result.Error,
   107  			)
   108  		}
   109  	}
   110  	for _, v := range volumes {
   111  		updateVolume(ctx, v)
   112  	}
   113  	// Note: the storage provisioner that creates a volume is also
   114  	// responsible for creating the volume attachment. It is therefore
   115  	// safe to set the volume attachment info after the volume info,
   116  	// without leading to the possibility of concurrent, duplicate
   117  	// attachments.
   118  	err = setVolumeAttachmentInfo(ctx, volumeAttachments)
   119  	if err != nil {
   120  		return errors.Trace(err)
   121  	}
   122  	return nil
   123  }
   124  
   125  // attachVolumes creates volume attachments with the specified parameters.
   126  func attachVolumes(ctx *context, ops map[params.MachineStorageId]*attachVolumeOp) error {
   127  	volumeAttachmentParams := make([]storage.VolumeAttachmentParams, 0, len(ops))
   128  	for _, op := range ops {
   129  		volumeAttachmentParams = append(volumeAttachmentParams, op.args)
   130  	}
   131  	paramsBySource, volumeSources, err := volumeAttachmentParamsBySource(
   132  		ctx.config.StorageDir, volumeAttachmentParams, ctx.config.Registry,
   133  	)
   134  	if err != nil {
   135  		return errors.Trace(err)
   136  	}
   137  	var reschedule []scheduleOp
   138  	var volumeAttachments []storage.VolumeAttachment
   139  	var statuses []params.EntityStatusArgs
   140  	for sourceName, volumeAttachmentParams := range paramsBySource {
   141  		logger.Debugf("attaching volumes: %+v", volumeAttachmentParams)
   142  		volumeSource := volumeSources[sourceName]
   143  		if volumeSource == nil {
   144  			// The storage provider does not support dynamic
   145  			// storage, there's nothing for the provisioner
   146  			// to do here.
   147  			continue
   148  		}
   149  		results, err := volumeSource.AttachVolumes(ctx.config.CloudCallContext, volumeAttachmentParams)
   150  		if err != nil {
   151  			return errors.Annotatef(err, "attaching volumes from source %q", sourceName)
   152  		}
   153  
   154  		for i, result := range results {
   155  			p := volumeAttachmentParams[i]
   156  			statuses = append(statuses, params.EntityStatusArgs{
   157  				Tag:    p.Volume.String(),
   158  				Status: status.Attached.String(),
   159  			})
   160  			entityStatus := &statuses[len(statuses)-1]
   161  			if result.Error != nil {
   162  				// Reschedule the volume attachment.
   163  				id := params.MachineStorageId{
   164  					MachineTag:    p.Machine.String(),
   165  					AttachmentTag: p.Volume.String(),
   166  				}
   167  				reschedule = append(reschedule, ops[id])
   168  
   169  				// Note: we keep the status as "attaching" to
   170  				// indicate that we will retry. When we distinguish
   171  				// between transient and permanent errors, we will
   172  				// set the status to "error" for permanent errors.
   173  				entityStatus.Status = status.Attaching.String()
   174  				entityStatus.Info = result.Error.Error()
   175  				logger.Debugf(
   176  					"failed to attach %s to %s: %v",
   177  					names.ReadableString(p.Volume),
   178  					names.ReadableString(p.Machine),
   179  					result.Error,
   180  				)
   181  				continue
   182  			}
   183  			volumeAttachments = append(volumeAttachments, *result.VolumeAttachment)
   184  		}
   185  	}
   186  	scheduleOperations(ctx, reschedule...)
   187  	setStatus(ctx, statuses)
   188  	if err := createVolumeAttachmentPlans(ctx, volumeAttachments); err != nil {
   189  		return errors.Trace(err)
   190  	}
   191  	if err := setVolumeAttachmentInfo(ctx, volumeAttachments); err != nil {
   192  		return errors.Trace(err)
   193  	}
   194  
   195  	return nil
   196  }
   197  
   198  // createVolumeAttachmentPlans creates a volume info plan in state, which notifies the machine
   199  // agent of the target instance that something has been attached to it.
   200  func createVolumeAttachmentPlans(ctx *context, volumeAttachments []storage.VolumeAttachment) error {
   201  	// NOTE(gsamfira): should we merge this with setVolumeInfo?
   202  	if len(volumeAttachments) == 0 {
   203  		return nil
   204  	}
   205  
   206  	volumeAttachmentPlans := make([]params.VolumeAttachmentPlan, len(volumeAttachments))
   207  	for i, val := range volumeAttachments {
   208  		volumeAttachmentPlans[i] = volumeAttachmentPlanFromAttachment(val)
   209  	}
   210  
   211  	errorResults, err := ctx.config.Volumes.CreateVolumeAttachmentPlans(volumeAttachmentPlans)
   212  	if err != nil {
   213  		return errors.Annotatef(err, "creating volume plans")
   214  	}
   215  	for i, result := range errorResults {
   216  		if result.Error != nil {
   217  			return errors.Annotatef(
   218  				result.Error, "creating volume plan of %s to %s to state",
   219  				names.ReadableString(volumeAttachments[i].Volume),
   220  				names.ReadableString(volumeAttachments[i].Machine),
   221  			)
   222  		}
   223  		// Record the volume attachment in the context.
   224  		id := params.MachineStorageId{
   225  			MachineTag:    volumeAttachmentPlans[i].MachineTag,
   226  			AttachmentTag: volumeAttachmentPlans[i].VolumeTag,
   227  		}
   228  		ctx.volumeAttachments[id] = volumeAttachments[i]
   229  		// removePendingVolumeAttachment(ctx, id)
   230  	}
   231  	return nil
   232  }
   233  
   234  func volumeAttachmentPlanFromAttachment(attachment storage.VolumeAttachment) params.VolumeAttachmentPlan {
   235  	var planInfo params.VolumeAttachmentPlanInfo
   236  	if attachment.PlanInfo != nil {
   237  		planInfo.DeviceAttributes = attachment.PlanInfo.DeviceAttributes
   238  		planInfo.DeviceType = attachment.PlanInfo.DeviceType
   239  	} else {
   240  		planInfo.DeviceType = storage.DeviceTypeLocal
   241  	}
   242  	return params.VolumeAttachmentPlan{
   243  		VolumeTag:  attachment.Volume.String(),
   244  		MachineTag: attachment.Machine.String(),
   245  		Life:       params.Alive,
   246  		PlanInfo:   planInfo,
   247  	}
   248  }
   249  
   250  // removeVolumes destroys or releases volumes with the specified parameters.
   251  func removeVolumes(ctx *context, ops map[names.VolumeTag]*removeVolumeOp) error {
   252  	tags := make([]names.VolumeTag, 0, len(ops))
   253  	for tag := range ops {
   254  		tags = append(tags, tag)
   255  	}
   256  	removeVolumeParams, err := removeVolumeParams(ctx, tags)
   257  	if err != nil {
   258  		return errors.Trace(err)
   259  	}
   260  	volumeParams := make([]storage.VolumeParams, len(tags))
   261  	removeVolumeParamsByTag := make(map[names.VolumeTag]params.RemoveVolumeParams)
   262  	for i, args := range removeVolumeParams {
   263  		removeVolumeParamsByTag[tags[i]] = args
   264  		volumeParams[i] = storage.VolumeParams{
   265  			Tag:      tags[i],
   266  			Provider: storage.ProviderType(args.Provider),
   267  		}
   268  	}
   269  	paramsBySource, volumeSources, err := volumeParamsBySource(
   270  		ctx.config.StorageDir, volumeParams, ctx.config.Registry,
   271  	)
   272  	if err != nil {
   273  		return errors.Trace(err)
   274  	}
   275  	var remove []names.Tag
   276  	var reschedule []scheduleOp
   277  	var statuses []params.EntityStatusArgs
   278  	removeVolumes := func(tags []names.VolumeTag, ids []string, f func(environscontext.ProviderCallContext, []string) ([]error, error)) error {
   279  		if len(ids) == 0 {
   280  			return nil
   281  		}
   282  		errs, err := f(ctx.config.CloudCallContext, ids)
   283  		if err != nil {
   284  			return errors.Trace(err)
   285  		}
   286  		for i, err := range errs {
   287  			tag := tags[i]
   288  			if err == nil {
   289  				remove = append(remove, tag)
   290  				continue
   291  			}
   292  			// Failed to destroy or release volume; reschedule and update status.
   293  			reschedule = append(reschedule, ops[tag])
   294  			statuses = append(statuses, params.EntityStatusArgs{
   295  				Tag:    tag.String(),
   296  				Status: status.Error.String(),
   297  				Info:   errors.Annotate(err, "destroying volume").Error(),
   298  			})
   299  		}
   300  		return nil
   301  	}
   302  	for sourceName, volumeParams := range paramsBySource {
   303  		logger.Debugf("removing volumes from %q: %v", sourceName, volumeParams)
   304  		volumeSource := volumeSources[sourceName]
   305  		removeTags := make([]names.VolumeTag, len(volumeParams))
   306  		removeParams := make([]params.RemoveVolumeParams, len(volumeParams))
   307  		for i, args := range volumeParams {
   308  			removeTags[i] = args.Tag
   309  			removeParams[i] = removeVolumeParamsByTag[args.Tag]
   310  		}
   311  		destroyTags, destroyIds, releaseTags, releaseIds := partitionRemoveVolumeParams(removeTags, removeParams)
   312  		if err := removeVolumes(destroyTags, destroyIds, volumeSource.DestroyVolumes); err != nil {
   313  			return errors.Trace(err)
   314  		}
   315  		if err := removeVolumes(releaseTags, releaseIds, volumeSource.ReleaseVolumes); err != nil {
   316  			return errors.Trace(err)
   317  		}
   318  	}
   319  	scheduleOperations(ctx, reschedule...)
   320  	setStatus(ctx, statuses)
   321  	if err := removeEntities(ctx, remove); err != nil {
   322  		return errors.Annotate(err, "removing volumes from state")
   323  	}
   324  	return nil
   325  }
   326  
   327  func partitionRemoveVolumeParams(removeTags []names.VolumeTag, removeParams []params.RemoveVolumeParams) (
   328  	destroyTags []names.VolumeTag, destroyIds []string,
   329  	releaseTags []names.VolumeTag, releaseIds []string,
   330  ) {
   331  	destroyTags = make([]names.VolumeTag, 0, len(removeParams))
   332  	destroyIds = make([]string, 0, len(removeParams))
   333  	releaseTags = make([]names.VolumeTag, 0, len(removeParams))
   334  	releaseIds = make([]string, 0, len(removeParams))
   335  	for i, args := range removeParams {
   336  		tag := removeTags[i]
   337  		if args.Destroy {
   338  			destroyTags = append(destroyTags, tag)
   339  			destroyIds = append(destroyIds, args.VolumeId)
   340  		} else {
   341  			releaseTags = append(releaseTags, tag)
   342  			releaseIds = append(releaseIds, args.VolumeId)
   343  		}
   344  	}
   345  	return
   346  }
   347  
   348  // detachVolumes destroys volume attachments with the specified parameters.
   349  func detachVolumes(ctx *context, ops map[params.MachineStorageId]*detachVolumeOp) error {
   350  	volumeAttachmentParams := make([]storage.VolumeAttachmentParams, 0, len(ops))
   351  	for _, op := range ops {
   352  		volumeAttachmentParams = append(volumeAttachmentParams, op.args)
   353  	}
   354  	paramsBySource, volumeSources, err := volumeAttachmentParamsBySource(
   355  		ctx.config.StorageDir, volumeAttachmentParams, ctx.config.Registry,
   356  	)
   357  	if err != nil {
   358  		return errors.Trace(err)
   359  	}
   360  	var reschedule []scheduleOp
   361  	var statuses []params.EntityStatusArgs
   362  	var remove []params.MachineStorageId
   363  	for sourceName, volumeAttachmentParams := range paramsBySource {
   364  		logger.Debugf("detaching volumes: %+v", volumeAttachmentParams)
   365  		volumeSource := volumeSources[sourceName]
   366  		if volumeSource == nil {
   367  			// The storage provider does not support dynamic
   368  			// storage, there's nothing for the provisioner
   369  			// to do here.
   370  			continue
   371  		}
   372  		errs, err := volumeSource.DetachVolumes(ctx.config.CloudCallContext, volumeAttachmentParams)
   373  		if err != nil {
   374  			return errors.Annotatef(err, "detaching volumes from source %q", sourceName)
   375  		}
   376  		for i, err := range errs {
   377  			p := volumeAttachmentParams[i]
   378  			statuses = append(statuses, params.EntityStatusArgs{
   379  				Tag: p.Volume.String(),
   380  				// TODO(axw) when we support multiple
   381  				// attachment, we'll have to check if
   382  				// there are any other attachments
   383  				// before saying the status "detached".
   384  				Status: status.Detached.String(),
   385  			})
   386  			id := params.MachineStorageId{
   387  				MachineTag:    p.Machine.String(),
   388  				AttachmentTag: p.Volume.String(),
   389  			}
   390  			entityStatus := &statuses[len(statuses)-1]
   391  			if err != nil {
   392  				reschedule = append(reschedule, ops[id])
   393  				entityStatus.Status = status.Detaching.String()
   394  				entityStatus.Info = err.Error()
   395  				logger.Debugf(
   396  					"failed to detach %s from %s: %v",
   397  					names.ReadableString(p.Volume),
   398  					names.ReadableString(p.Machine),
   399  					err,
   400  				)
   401  				continue
   402  			}
   403  			remove = append(remove, id)
   404  		}
   405  	}
   406  	scheduleOperations(ctx, reschedule...)
   407  	setStatus(ctx, statuses)
   408  	if err := removeAttachments(ctx, remove); err != nil {
   409  		return errors.Annotate(err, "removing attachments from state")
   410  	}
   411  	for _, id := range remove {
   412  		delete(ctx.volumeAttachments, id)
   413  	}
   414  	return nil
   415  }
   416  
   417  // volumeParamsBySource separates the volume parameters by volume source.
   418  func volumeParamsBySource(
   419  	baseStorageDir string,
   420  	params []storage.VolumeParams,
   421  	registry storage.ProviderRegistry,
   422  ) (map[string][]storage.VolumeParams, map[string]storage.VolumeSource, error) {
   423  	// TODO(axw) later we may have multiple instantiations (sources)
   424  	// for a storage provider, e.g. multiple Ceph installations. For
   425  	// now we assume a single source for each provider type, with no
   426  	// configuration.
   427  	volumeSources := make(map[string]storage.VolumeSource)
   428  	for _, params := range params {
   429  		sourceName := string(params.Provider)
   430  		if _, ok := volumeSources[sourceName]; ok {
   431  			continue
   432  		}
   433  		volumeSource, err := volumeSource(
   434  			baseStorageDir, sourceName, params.Provider, registry,
   435  		)
   436  		if errors.Cause(err) == errNonDynamic {
   437  			volumeSource = nil
   438  		} else if err != nil {
   439  			return nil, nil, errors.Annotate(err, "getting volume source")
   440  		}
   441  		volumeSources[sourceName] = volumeSource
   442  	}
   443  	paramsBySource := make(map[string][]storage.VolumeParams)
   444  	for _, params := range params {
   445  		sourceName := string(params.Provider)
   446  		volumeSource := volumeSources[sourceName]
   447  		if volumeSource == nil {
   448  			// Ignore nil volume sources; this means that the
   449  			// volume should be created by the machine-provisioner.
   450  			continue
   451  		}
   452  		paramsBySource[sourceName] = append(paramsBySource[sourceName], params)
   453  	}
   454  	return paramsBySource, volumeSources, nil
   455  }
   456  
   457  // validateVolumeParams validates a collection of volume parameters.
   458  func validateVolumeParams(
   459  	volumeSource storage.VolumeSource, volumeParams []storage.VolumeParams,
   460  ) ([]storage.VolumeParams, []error) {
   461  	valid := make([]storage.VolumeParams, 0, len(volumeParams))
   462  	results := make([]error, len(volumeParams))
   463  	for i, params := range volumeParams {
   464  		err := volumeSource.ValidateVolumeParams(params)
   465  		if err == nil {
   466  			valid = append(valid, params)
   467  		}
   468  		results[i] = err
   469  	}
   470  	return valid, results
   471  }
   472  
   473  // volumeAttachmentParamsBySource separates the volume attachment parameters by volume source.
   474  func volumeAttachmentParamsBySource(
   475  	baseStorageDir string,
   476  	params []storage.VolumeAttachmentParams,
   477  	registry storage.ProviderRegistry,
   478  ) (map[string][]storage.VolumeAttachmentParams, map[string]storage.VolumeSource, error) {
   479  	// TODO(axw) later we may have multiple instantiations (sources)
   480  	// for a storage provider, e.g. multiple Ceph installations. For
   481  	// now we assume a single source for each provider type, with no
   482  	// configuration.
   483  	volumeSources := make(map[string]storage.VolumeSource)
   484  	paramsBySource := make(map[string][]storage.VolumeAttachmentParams)
   485  	for _, params := range params {
   486  		sourceName := string(params.Provider)
   487  		paramsBySource[sourceName] = append(paramsBySource[sourceName], params)
   488  		if _, ok := volumeSources[sourceName]; ok {
   489  			continue
   490  		}
   491  		volumeSource, err := volumeSource(
   492  			baseStorageDir, sourceName, params.Provider, registry,
   493  		)
   494  		if errors.Cause(err) == errNonDynamic {
   495  			volumeSource = nil
   496  		} else if err != nil {
   497  			return nil, nil, errors.Annotate(err, "getting volume source")
   498  		}
   499  		volumeSources[sourceName] = volumeSource
   500  	}
   501  	return paramsBySource, volumeSources, nil
   502  }
   503  
   504  func setVolumeAttachmentInfo(ctx *context, volumeAttachments []storage.VolumeAttachment) error {
   505  	if len(volumeAttachments) == 0 {
   506  		return nil
   507  	}
   508  	// TODO(axw) we need to be able to list volume attachments in the
   509  	// provider, by environment, so that we can "harvest" them if they're
   510  	// unknown. This will take care of killing volumes that we fail to
   511  	// record in state.
   512  	errorResults, err := ctx.config.Volumes.SetVolumeAttachmentInfo(
   513  		volumeAttachmentsFromStorage(volumeAttachments),
   514  	)
   515  	if err != nil {
   516  		return errors.Annotate(err, "publishing volumes to state")
   517  	}
   518  	for i, result := range errorResults {
   519  		if result.Error != nil {
   520  			return errors.Annotatef(
   521  				result.Error, "publishing attachment of %s to %s to state",
   522  				names.ReadableString(volumeAttachments[i].Volume),
   523  				names.ReadableString(volumeAttachments[i].Machine),
   524  			)
   525  		}
   526  		// Record the volume attachment in the context.
   527  		id := params.MachineStorageId{
   528  			MachineTag:    volumeAttachments[i].Machine.String(),
   529  			AttachmentTag: volumeAttachments[i].Volume.String(),
   530  		}
   531  		ctx.volumeAttachments[id] = volumeAttachments[i]
   532  		removePendingVolumeAttachment(ctx, id)
   533  	}
   534  	return nil
   535  }
   536  
   537  type createVolumeOp struct {
   538  	exponentialBackoff
   539  	args storage.VolumeParams
   540  }
   541  
   542  func (op *createVolumeOp) key() interface{} {
   543  	return op.args.Tag
   544  }
   545  
   546  type removeVolumeOp struct {
   547  	exponentialBackoff
   548  	tag names.VolumeTag
   549  }
   550  
   551  func (op *removeVolumeOp) key() interface{} {
   552  	return op.tag
   553  }
   554  
   555  type attachVolumeOp struct {
   556  	exponentialBackoff
   557  	args storage.VolumeAttachmentParams
   558  }
   559  
   560  func (op *attachVolumeOp) key() interface{} {
   561  	return params.MachineStorageId{
   562  		MachineTag:    op.args.Machine.String(),
   563  		AttachmentTag: op.args.Volume.String(),
   564  	}
   565  }
   566  
   567  type detachVolumeOp struct {
   568  	exponentialBackoff
   569  	args storage.VolumeAttachmentParams
   570  }
   571  
   572  func (op *detachVolumeOp) key() interface{} {
   573  	return params.MachineStorageId{
   574  		MachineTag:    op.args.Machine.String(),
   575  		AttachmentTag: op.args.Volume.String(),
   576  	}
   577  }