github.com/anth0d/nomad@v0.0.0-20221214183521-ae3a0a2cad06/client/allocrunner/taskrunner/volume_hook.go (about)

     1  package taskrunner
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  
     7  	log "github.com/hashicorp/go-hclog"
     8  	multierror "github.com/hashicorp/go-multierror"
     9  	"github.com/hashicorp/nomad/client/allocrunner/interfaces"
    10  	"github.com/hashicorp/nomad/client/taskenv"
    11  	"github.com/hashicorp/nomad/nomad/structs"
    12  	"github.com/hashicorp/nomad/plugins/drivers"
    13  )
    14  
    15  type volumeHook struct {
    16  	alloc   *structs.Allocation
    17  	runner  *TaskRunner
    18  	logger  log.Logger
    19  	taskEnv *taskenv.TaskEnv
    20  }
    21  
    22  func newVolumeHook(runner *TaskRunner, logger log.Logger) *volumeHook {
    23  	h := &volumeHook{
    24  		alloc:  runner.Alloc(),
    25  		runner: runner,
    26  	}
    27  	h.logger = logger.Named(h.Name())
    28  	return h
    29  }
    30  
    31  func (*volumeHook) Name() string {
    32  	return "volumes"
    33  }
    34  
    35  func validateHostVolumes(requestedByAlias map[string]*structs.VolumeRequest, clientVolumesByName map[string]*structs.ClientHostVolumeConfig) error {
    36  	var result error
    37  
    38  	for _, req := range requestedByAlias {
    39  		// This is a defensive check, but this function should only ever receive
    40  		// host-type volumes.
    41  		if req.Type != structs.VolumeTypeHost {
    42  			continue
    43  		}
    44  
    45  		_, ok := clientVolumesByName[req.Source]
    46  		if !ok {
    47  			result = multierror.Append(result, fmt.Errorf("missing %s", req.Source))
    48  		}
    49  	}
    50  
    51  	return result
    52  }
    53  
    54  // hostVolumeMountConfigurations takes the users requested volume mounts,
    55  // volumes, and the client host volume configuration and converts them into a
    56  // format that can be used by drivers.
    57  func (h *volumeHook) hostVolumeMountConfigurations(taskMounts []*structs.VolumeMount, taskVolumesByAlias map[string]*structs.VolumeRequest, clientVolumesByName map[string]*structs.ClientHostVolumeConfig) ([]*drivers.MountConfig, error) {
    58  	var mounts []*drivers.MountConfig
    59  	for _, m := range taskMounts {
    60  		req, ok := taskVolumesByAlias[m.Volume]
    61  		if !ok {
    62  			// This function receives only the task volumes that are of type Host,
    63  			// if we can't find a group volume then we assume the mount is for another
    64  			// type.
    65  			continue
    66  		}
    67  
    68  		// This is a defensive check, but this function should only ever receive
    69  		// host-type volumes.
    70  		if req.Type != structs.VolumeTypeHost {
    71  			continue
    72  		}
    73  
    74  		hostVolume, ok := clientVolumesByName[req.Source]
    75  		if !ok {
    76  			// Should never happen, but unless the client volumes were mutated during
    77  			// the execution of this hook.
    78  			return nil, fmt.Errorf("No host volume named: %s", req.Source)
    79  		}
    80  
    81  		mcfg := &drivers.MountConfig{
    82  			HostPath: hostVolume.Path,
    83  			TaskPath: m.Destination,
    84  			Readonly: hostVolume.ReadOnly || req.ReadOnly || m.ReadOnly,
    85  		}
    86  		mounts = append(mounts, mcfg)
    87  	}
    88  
    89  	return mounts, nil
    90  }
    91  
    92  // partitionVolumesByType takes a map of volume-alias to volume-request and
    93  // returns them in the form of volume-type:(volume-alias:volume-request)
    94  func partitionVolumesByType(xs map[string]*structs.VolumeRequest) map[string]map[string]*structs.VolumeRequest {
    95  	result := make(map[string]map[string]*structs.VolumeRequest)
    96  	for name, req := range xs {
    97  		txs, ok := result[req.Type]
    98  		if !ok {
    99  			txs = make(map[string]*structs.VolumeRequest)
   100  			result[req.Type] = txs
   101  		}
   102  		txs[name] = req
   103  	}
   104  
   105  	return result
   106  }
   107  
   108  func (h *volumeHook) prepareHostVolumes(req *interfaces.TaskPrestartRequest, volumes map[string]*structs.VolumeRequest) ([]*drivers.MountConfig, error) {
   109  	hostVolumes := h.runner.clientConfig.Node.HostVolumes
   110  
   111  	// Always validate volumes to ensure that we do not allow volumes to be used
   112  	// if a host is restarted and loses the host volume configuration.
   113  	if err := validateHostVolumes(volumes, hostVolumes); err != nil {
   114  		h.logger.Error("Requested Host Volume does not exist", "existing", hostVolumes, "requested", volumes)
   115  		return nil, fmt.Errorf("host volume validation error: %v", err)
   116  	}
   117  
   118  	hostVolumeMounts, err := h.hostVolumeMountConfigurations(req.Task.VolumeMounts, volumes, hostVolumes)
   119  	if err != nil {
   120  		h.logger.Error("Failed to generate host volume mounts", "error", err)
   121  		return nil, err
   122  	}
   123  
   124  	if len(hostVolumeMounts) > 0 {
   125  		caps, err := h.runner.DriverCapabilities()
   126  		if err != nil {
   127  			return nil, fmt.Errorf("could not validate task driver capabilities: %v", err)
   128  		}
   129  		if caps.MountConfigs == drivers.MountConfigSupportNone {
   130  			return nil, fmt.Errorf(
   131  				"task driver %q for %q does not support host volumes",
   132  				h.runner.task.Driver, h.runner.task.Name)
   133  		}
   134  	}
   135  
   136  	return hostVolumeMounts, nil
   137  }
   138  
   139  // partitionMountsByVolume takes a list of volume mounts and returns them in the
   140  // form of volume-alias:[]volume-mount because one volume may be mounted multiple
   141  // times.
   142  func partitionMountsByVolume(xs []*structs.VolumeMount) map[string][]*structs.VolumeMount {
   143  	result := make(map[string][]*structs.VolumeMount)
   144  	for _, mount := range xs {
   145  		result[mount.Volume] = append(result[mount.Volume], mount)
   146  	}
   147  
   148  	return result
   149  }
   150  
   151  func (h *volumeHook) prepareCSIVolumes(req *interfaces.TaskPrestartRequest, volumes map[string]*structs.VolumeRequest) ([]*drivers.MountConfig, error) {
   152  	if len(volumes) == 0 {
   153  		return nil, nil
   154  	}
   155  
   156  	var mounts []*drivers.MountConfig
   157  
   158  	mountRequests := partitionMountsByVolume(req.Task.VolumeMounts)
   159  	csiMountPoints := h.runner.allocHookResources.GetCSIMounts()
   160  	for alias, request := range volumes {
   161  		mountsForAlias, ok := mountRequests[alias]
   162  		if !ok {
   163  			// This task doesn't use the volume
   164  			continue
   165  		}
   166  
   167  		csiMountPoint, ok := csiMountPoints[alias]
   168  		if !ok {
   169  			return nil, fmt.Errorf("No CSI Mount Point found for volume: %s", alias)
   170  		}
   171  
   172  		for _, m := range mountsForAlias {
   173  			mcfg := &drivers.MountConfig{
   174  				HostPath: csiMountPoint.Source,
   175  				TaskPath: m.Destination,
   176  				Readonly: request.ReadOnly || m.ReadOnly,
   177  			}
   178  			mounts = append(mounts, mcfg)
   179  		}
   180  	}
   181  
   182  	if len(mounts) > 0 {
   183  		caps, err := h.runner.DriverCapabilities()
   184  		if err != nil {
   185  			return nil, fmt.Errorf("could not validate task driver capabilities: %v", err)
   186  		}
   187  		if caps.MountConfigs == drivers.MountConfigSupportNone {
   188  			return nil, fmt.Errorf(
   189  				"task driver %q for %q does not support CSI",
   190  				h.runner.task.Driver, h.runner.task.Name)
   191  		}
   192  	}
   193  
   194  	return mounts, nil
   195  }
   196  
   197  func (h *volumeHook) Prestart(ctx context.Context, req *interfaces.TaskPrestartRequest, resp *interfaces.TaskPrestartResponse) error {
   198  	h.taskEnv = req.TaskEnv
   199  	interpolateVolumeMounts(req.Task.VolumeMounts, h.taskEnv)
   200  
   201  	volumes := partitionVolumesByType(h.alloc.Job.LookupTaskGroup(h.alloc.TaskGroup).Volumes)
   202  
   203  	hostVolumeMounts, err := h.prepareHostVolumes(req, volumes[structs.VolumeTypeHost])
   204  	if err != nil {
   205  		return err
   206  	}
   207  
   208  	csiVolumeMounts, err := h.prepareCSIVolumes(req, volumes[structs.VolumeTypeCSI])
   209  	if err != nil {
   210  		return err
   211  	}
   212  
   213  	// Because this hook is also ran on restores, we only add mounts that do not
   214  	// already exist. Although this loop is somewhat expensive, there are only
   215  	// a small number of mounts that exist within most individual tasks. We may
   216  	// want to revisit this using a `hookdata` param to be "mount only once"
   217  	mounts := h.runner.hookResources.getMounts()
   218  	for _, m := range hostVolumeMounts {
   219  		mounts = ensureMountpointInserted(mounts, m)
   220  	}
   221  	for _, m := range csiVolumeMounts {
   222  		mounts = ensureMountpointInserted(mounts, m)
   223  	}
   224  	h.runner.hookResources.setMounts(mounts)
   225  
   226  	return nil
   227  }
   228  
   229  func interpolateVolumeMounts(mounts []*structs.VolumeMount, taskEnv *taskenv.TaskEnv) {
   230  	for _, mount := range mounts {
   231  		mount.Volume = taskEnv.ReplaceEnv(mount.Volume)
   232  		mount.Destination = taskEnv.ReplaceEnv(mount.Destination)
   233  		mount.PropagationMode = taskEnv.ReplaceEnv(mount.PropagationMode)
   234  	}
   235  }