github.com/iqoqo/nomad@v0.11.3-0.20200911112621-d7021c74d101/client/allocrunner/taskrunner/volume_hook.go (about)

     1  package taskrunner
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  
     7  	log "github.com/hashicorp/go-hclog"
     8  	multierror "github.com/hashicorp/go-multierror"
     9  	"github.com/hashicorp/nomad/client/allocrunner/interfaces"
    10  	"github.com/hashicorp/nomad/client/taskenv"
    11  	"github.com/hashicorp/nomad/nomad/structs"
    12  	"github.com/hashicorp/nomad/plugins/drivers"
    13  )
    14  
    15  type volumeHook struct {
    16  	alloc   *structs.Allocation
    17  	runner  *TaskRunner
    18  	logger  log.Logger
    19  	taskEnv *taskenv.TaskEnv
    20  }
    21  
    22  func newVolumeHook(runner *TaskRunner, logger log.Logger) *volumeHook {
    23  	h := &volumeHook{
    24  		alloc:  runner.Alloc(),
    25  		runner: runner,
    26  	}
    27  	h.logger = logger.Named(h.Name())
    28  	return h
    29  }
    30  
    31  func (*volumeHook) Name() string {
    32  	return "volumes"
    33  }
    34  
    35  func validateHostVolumes(requestedByAlias map[string]*structs.VolumeRequest, clientVolumesByName map[string]*structs.ClientHostVolumeConfig) error {
    36  	var result error
    37  
    38  	for _, req := range requestedByAlias {
    39  		// This is a defensive check, but this function should only ever receive
    40  		// host-type volumes.
    41  		if req.Type != structs.VolumeTypeHost {
    42  			continue
    43  		}
    44  
    45  		_, ok := clientVolumesByName[req.Source]
    46  		if !ok {
    47  			result = multierror.Append(result, fmt.Errorf("missing %s", req.Source))
    48  		}
    49  	}
    50  
    51  	return result
    52  }
    53  
    54  // hostVolumeMountConfigurations takes the users requested volume mounts,
    55  // volumes, and the client host volume configuration and converts them into a
    56  // format that can be used by drivers.
    57  func (h *volumeHook) hostVolumeMountConfigurations(taskMounts []*structs.VolumeMount, taskVolumesByAlias map[string]*structs.VolumeRequest, clientVolumesByName map[string]*structs.ClientHostVolumeConfig) ([]*drivers.MountConfig, error) {
    58  	var mounts []*drivers.MountConfig
    59  	for _, m := range taskMounts {
    60  		req, ok := taskVolumesByAlias[m.Volume]
    61  		if !ok {
    62  			// This function receives only the task volumes that are of type Host,
    63  			// if we can't find a group volume then we assume the mount is for another
    64  			// type.
    65  			continue
    66  		}
    67  
    68  		// This is a defensive check, but this function should only ever receive
    69  		// host-type volumes.
    70  		if req.Type != structs.VolumeTypeHost {
    71  			continue
    72  		}
    73  
    74  		hostVolume, ok := clientVolumesByName[req.Source]
    75  		if !ok {
    76  			// Should never happen, but unless the client volumes were mutated during
    77  			// the execution of this hook.
    78  			return nil, fmt.Errorf("No host volume named: %s", req.Source)
    79  		}
    80  
    81  		mcfg := &drivers.MountConfig{
    82  			HostPath: hostVolume.Path,
    83  			TaskPath: m.Destination,
    84  			Readonly: hostVolume.ReadOnly || req.ReadOnly || m.ReadOnly,
    85  		}
    86  		mounts = append(mounts, mcfg)
    87  	}
    88  
    89  	return mounts, nil
    90  }
    91  
    92  // partitionVolumesByType takes a map of volume-alias to volume-request and
    93  // returns them in the form of volume-type:(volume-alias:volume-request)
    94  func partitionVolumesByType(xs map[string]*structs.VolumeRequest) map[string]map[string]*structs.VolumeRequest {
    95  	result := make(map[string]map[string]*structs.VolumeRequest)
    96  	for name, req := range xs {
    97  		txs, ok := result[req.Type]
    98  		if !ok {
    99  			txs = make(map[string]*structs.VolumeRequest)
   100  			result[req.Type] = txs
   101  		}
   102  		txs[name] = req
   103  	}
   104  
   105  	return result
   106  }
   107  
   108  func (h *volumeHook) prepareHostVolumes(req *interfaces.TaskPrestartRequest, volumes map[string]*structs.VolumeRequest) ([]*drivers.MountConfig, error) {
   109  	hostVolumes := h.runner.clientConfig.Node.HostVolumes
   110  
   111  	// Always validate volumes to ensure that we do not allow volumes to be used
   112  	// if a host is restarted and loses the host volume configuration.
   113  	if err := validateHostVolumes(volumes, hostVolumes); err != nil {
   114  		h.logger.Error("Requested Host Volume does not exist", "existing", hostVolumes, "requested", volumes)
   115  		return nil, fmt.Errorf("host volume validation error: %v", err)
   116  	}
   117  
   118  	hostVolumeMounts, err := h.hostVolumeMountConfigurations(req.Task.VolumeMounts, volumes, hostVolumes)
   119  	if err != nil {
   120  		h.logger.Error("Failed to generate host volume mounts", "error", err)
   121  		return nil, err
   122  	}
   123  
   124  	return hostVolumeMounts, nil
   125  }
   126  
   127  // partitionMountsByVolume takes a list of volume mounts and returns them in the
   128  // form of volume-alias:[]volume-mount because one volume may be mounted multiple
   129  // times.
   130  func partitionMountsByVolume(xs []*structs.VolumeMount) map[string][]*structs.VolumeMount {
   131  	result := make(map[string][]*structs.VolumeMount)
   132  	for _, mount := range xs {
   133  		result[mount.Volume] = append(result[mount.Volume], mount)
   134  	}
   135  
   136  	return result
   137  }
   138  
   139  func (h *volumeHook) prepareCSIVolumes(req *interfaces.TaskPrestartRequest, volumes map[string]*structs.VolumeRequest) ([]*drivers.MountConfig, error) {
   140  	if len(volumes) == 0 {
   141  		return nil, nil
   142  	}
   143  
   144  	var mounts []*drivers.MountConfig
   145  
   146  	mountRequests := partitionMountsByVolume(req.Task.VolumeMounts)
   147  	csiMountPoints := h.runner.allocHookResources.GetCSIMounts()
   148  	for alias, request := range volumes {
   149  		mountsForAlias, ok := mountRequests[alias]
   150  		if !ok {
   151  			// This task doesn't use the volume
   152  			continue
   153  		}
   154  
   155  		csiMountPoint, ok := csiMountPoints[alias]
   156  		if !ok {
   157  			return nil, fmt.Errorf("No CSI Mount Point found for volume: %s", alias)
   158  		}
   159  
   160  		for _, m := range mountsForAlias {
   161  			mcfg := &drivers.MountConfig{
   162  				HostPath: csiMountPoint.Source,
   163  				TaskPath: m.Destination,
   164  				Readonly: request.ReadOnly || m.ReadOnly,
   165  			}
   166  			mounts = append(mounts, mcfg)
   167  		}
   168  	}
   169  
   170  	return mounts, nil
   171  }
   172  
   173  func (h *volumeHook) Prestart(ctx context.Context, req *interfaces.TaskPrestartRequest, resp *interfaces.TaskPrestartResponse) error {
   174  	h.taskEnv = req.TaskEnv
   175  	interpolateVolumeMounts(req.Task.VolumeMounts, h.taskEnv)
   176  
   177  	volumes := partitionVolumesByType(h.alloc.Job.LookupTaskGroup(h.alloc.TaskGroup).Volumes)
   178  
   179  	hostVolumeMounts, err := h.prepareHostVolumes(req, volumes[structs.VolumeTypeHost])
   180  	if err != nil {
   181  		return err
   182  	}
   183  
   184  	csiVolumeMounts, err := h.prepareCSIVolumes(req, volumes[structs.VolumeTypeCSI])
   185  	if err != nil {
   186  		return err
   187  	}
   188  
   189  	// Because this hook is also ran on restores, we only add mounts that do not
   190  	// already exist. Although this loop is somewhat expensive, there are only
   191  	// a small number of mounts that exist within most individual tasks. We may
   192  	// want to revisit this using a `hookdata` param to be "mount only once"
   193  	mounts := h.runner.hookResources.getMounts()
   194  	for _, m := range hostVolumeMounts {
   195  		mounts = ensureMountpointInserted(mounts, m)
   196  	}
   197  	for _, m := range csiVolumeMounts {
   198  		mounts = ensureMountpointInserted(mounts, m)
   199  	}
   200  	h.runner.hookResources.setMounts(mounts)
   201  
   202  	return nil
   203  }
   204  
   205  func interpolateVolumeMounts(mounts []*structs.VolumeMount, taskEnv *taskenv.TaskEnv) {
   206  	for _, mount := range mounts {
   207  		mount.Volume = taskEnv.ReplaceEnv(mount.Volume)
   208  		mount.Destination = taskEnv.ReplaceEnv(mount.Destination)
   209  		mount.PropagationMode = taskEnv.ReplaceEnv(mount.PropagationMode)
   210  	}
   211  }