github.com/manicqin/nomad@v0.9.5/client/allocrunner/taskrunner/volume_hook.go (about)

     1  package taskrunner
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  
     7  	log "github.com/hashicorp/go-hclog"
     8  	multierror "github.com/hashicorp/go-multierror"
     9  	"github.com/hashicorp/nomad/client/allocrunner/interfaces"
    10  	"github.com/hashicorp/nomad/nomad/structs"
    11  	"github.com/hashicorp/nomad/plugins/drivers"
    12  )
    13  
    14  type volumeHook struct {
    15  	alloc  *structs.Allocation
    16  	runner *TaskRunner
    17  	logger log.Logger
    18  }
    19  
    20  func newVolumeHook(runner *TaskRunner, logger log.Logger) *volumeHook {
    21  	h := &volumeHook{
    22  		alloc:  runner.Alloc(),
    23  		runner: runner,
    24  	}
    25  	h.logger = logger.Named(h.Name())
    26  	return h
    27  }
    28  
    29  func (*volumeHook) Name() string {
    30  	return "volumes"
    31  }
    32  
    33  func validateHostVolumes(requestedByAlias map[string]*structs.VolumeRequest, clientVolumesByName map[string]*structs.ClientHostVolumeConfig) error {
    34  	var result error
    35  
    36  	for _, req := range requestedByAlias {
    37  		if req.Type != structs.VolumeTypeHost {
    38  			continue
    39  		}
    40  
    41  		_, ok := clientVolumesByName[req.Source]
    42  		if !ok {
    43  			result = multierror.Append(result, fmt.Errorf("missing %s", req.Source))
    44  		}
    45  	}
    46  
    47  	return result
    48  }
    49  
    50  // hostVolumeMountConfigurations takes the users requested volume mounts,
    51  // volumes, and the client host volume configuration and converts them into a
    52  // format that can be used by drivers.
    53  func (h *volumeHook) hostVolumeMountConfigurations(taskMounts []*structs.VolumeMount, taskVolumesByAlias map[string]*structs.VolumeRequest, clientVolumesByName map[string]*structs.ClientHostVolumeConfig) ([]*drivers.MountConfig, error) {
    54  	var mounts []*drivers.MountConfig
    55  	for _, m := range taskMounts {
    56  		req, ok := taskVolumesByAlias[m.Volume]
    57  		if !ok {
    58  			// Should never happen unless we misvalidated on job submission
    59  			return nil, fmt.Errorf("No group volume declaration found named: %s", m.Volume)
    60  		}
    61  
    62  		hostVolume, ok := clientVolumesByName[req.Source]
    63  		if !ok {
    64  			// Should never happen, but unless the client volumes were mutated during
    65  			// the execution of this hook.
    66  			return nil, fmt.Errorf("No host volume named: %s", req.Source)
    67  		}
    68  
    69  		mcfg := &drivers.MountConfig{
    70  			HostPath: hostVolume.Path,
    71  			TaskPath: m.Destination,
    72  			Readonly: hostVolume.ReadOnly || req.ReadOnly || m.ReadOnly,
    73  		}
    74  		mounts = append(mounts, mcfg)
    75  	}
    76  
    77  	return mounts, nil
    78  }
    79  
    80  func (h *volumeHook) Prestart(ctx context.Context, req *interfaces.TaskPrestartRequest, resp *interfaces.TaskPrestartResponse) error {
    81  	volumes := h.alloc.Job.LookupTaskGroup(h.alloc.TaskGroup).Volumes
    82  	mounts := h.runner.hookResources.getMounts()
    83  
    84  	hostVolumes := h.runner.clientConfig.Node.HostVolumes
    85  
    86  	// Always validate volumes to ensure that we do not allow volumes to be used
    87  	// if a host is restarted and loses the host volume configuration.
    88  	if err := validateHostVolumes(volumes, hostVolumes); err != nil {
    89  		h.logger.Error("Requested Host Volume does not exist", "existing", hostVolumes, "requested", volumes)
    90  		return fmt.Errorf("host volume validation error: %v", err)
    91  	}
    92  
    93  	requestedMounts, err := h.hostVolumeMountConfigurations(req.Task.VolumeMounts, volumes, hostVolumes)
    94  	if err != nil {
    95  		h.logger.Error("Failed to generate volume mounts", "error", err)
    96  		return err
    97  	}
    98  
    99  	// Because this hook is also ran on restores, we only add mounts that do not
   100  	// already exist. Although this loop is somewhat expensive, there are only
   101  	// a small number of mounts that exist within most individual tasks. We may
   102  	// want to revisit this using a `hookdata` param to be "mount only once"
   103  REQUESTED:
   104  	for _, m := range requestedMounts {
   105  		for _, em := range mounts {
   106  			if em.IsEqual(m) {
   107  				continue REQUESTED
   108  			}
   109  		}
   110  
   111  		mounts = append(mounts, m)
   112  	}
   113  
   114  	h.runner.hookResources.setMounts(mounts)
   115  	return nil
   116  }