github.com/rohankumardubey/nomad@v0.11.8/scheduler/feasible.go (about)

     1  package scheduler
     2  
     3  import (
     4  	"fmt"
     5  	"reflect"
     6  	"regexp"
     7  	"strconv"
     8  	"strings"
     9  
    10  	memdb "github.com/hashicorp/go-memdb"
    11  	version "github.com/hashicorp/go-version"
    12  	"github.com/hashicorp/nomad/helper/constraints/semver"
    13  	"github.com/hashicorp/nomad/nomad/structs"
    14  	psstructs "github.com/hashicorp/nomad/plugins/shared/structs"
    15  )
    16  
    17  const (
    18  	FilterConstraintHostVolumes                 = "missing compatible host volumes"
    19  	FilterConstraintCSIPluginTemplate           = "CSI plugin %s is missing from client %s"
    20  	FilterConstraintCSIPluginUnhealthyTemplate  = "CSI plugin %s is unhealthy on client %s"
    21  	FilterConstraintCSIPluginMaxVolumesTemplate = "CSI plugin %s has the maximum number of volumes on client %s"
    22  	FilterConstraintCSIVolumesLookupFailed      = "CSI volume lookup failed"
    23  	FilterConstraintCSIVolumeNotFoundTemplate   = "missing CSI Volume %s"
    24  	FilterConstraintCSIVolumeNoReadTemplate     = "CSI volume %s is unschedulable or has exhausted its available reader claims"
    25  	FilterConstraintCSIVolumeNoWriteTemplate    = "CSI volume %s is unschedulable or is read-only"
    26  	FilterConstraintCSIVolumeInUseTemplate      = "CSI volume %s has exhausted its available writer claims" //
    27  	FilterConstraintDrivers                     = "missing drivers"
    28  	FilterConstraintDevices                     = "missing devices"
    29  )
    30  
    31  // FeasibleIterator is used to iteratively yield nodes that
    32  // match feasibility constraints. The iterators may manage
    33  // some state for performance optimizations.
    34  type FeasibleIterator interface {
    35  	// Next yields a feasible node or nil if exhausted
    36  	Next() *structs.Node
    37  
    38  	// Reset is invoked when an allocation has been placed
    39  	// to reset any stale state.
    40  	Reset()
    41  }
    42  
    43  // JobContextualIterator is an iterator that can have the job and task group set
    44  // on it.
    45  type ContextualIterator interface {
    46  	SetJob(*structs.Job)
    47  	SetTaskGroup(*structs.TaskGroup)
    48  }
    49  
    50  // FeasibilityChecker is used to check if a single node meets feasibility
    51  // constraints.
    52  type FeasibilityChecker interface {
    53  	Feasible(*structs.Node) bool
    54  }
    55  
    56  // StaticIterator is a FeasibleIterator which returns nodes
    57  // in a static order. This is used at the base of the iterator
    58  // chain only for testing due to deterministic behavior.
    59  type StaticIterator struct {
    60  	ctx    Context
    61  	nodes  []*structs.Node
    62  	offset int
    63  	seen   int
    64  }
    65  
    66  // NewStaticIterator constructs a random iterator from a list of nodes
    67  func NewStaticIterator(ctx Context, nodes []*structs.Node) *StaticIterator {
    68  	iter := &StaticIterator{
    69  		ctx:   ctx,
    70  		nodes: nodes,
    71  	}
    72  	return iter
    73  }
    74  
    75  func (iter *StaticIterator) Next() *structs.Node {
    76  	// Check if exhausted
    77  	n := len(iter.nodes)
    78  	if iter.offset == n || iter.seen == n {
    79  		if iter.seen != n { // seen has been Reset() to 0
    80  			iter.offset = 0
    81  		} else {
    82  			return nil
    83  		}
    84  	}
    85  
    86  	// Return the next offset, use this one
    87  	offset := iter.offset
    88  	iter.offset += 1
    89  	iter.seen += 1
    90  	iter.ctx.Metrics().EvaluateNode()
    91  	return iter.nodes[offset]
    92  }
    93  
    94  func (iter *StaticIterator) Reset() {
    95  	iter.seen = 0
    96  }
    97  
    98  func (iter *StaticIterator) SetNodes(nodes []*structs.Node) {
    99  	iter.nodes = nodes
   100  	iter.offset = 0
   101  	iter.seen = 0
   102  }
   103  
   104  // NewRandomIterator constructs a static iterator from a list of nodes
   105  // after applying the Fisher-Yates algorithm for a random shuffle. This
   106  // is applied in-place
   107  func NewRandomIterator(ctx Context, nodes []*structs.Node) *StaticIterator {
   108  	// shuffle with the Fisher-Yates algorithm
   109  	shuffleNodes(nodes)
   110  
   111  	// Create a static iterator
   112  	return NewStaticIterator(ctx, nodes)
   113  }
   114  
   115  // HostVolumeChecker is a FeasibilityChecker which returns whether a node has
   116  // the host volumes necessary to schedule a task group.
   117  type HostVolumeChecker struct {
   118  	ctx Context
   119  
   120  	// volumes is a map[HostVolumeName][]RequestedVolume. The requested volumes are
   121  	// a slice because a single task group may request the same volume multiple times.
   122  	volumes map[string][]*structs.VolumeRequest
   123  }
   124  
   125  // NewHostVolumeChecker creates a HostVolumeChecker from a set of volumes
   126  func NewHostVolumeChecker(ctx Context) *HostVolumeChecker {
   127  	return &HostVolumeChecker{
   128  		ctx: ctx,
   129  	}
   130  }
   131  
   132  // SetVolumes takes the volumes required by a task group and updates the checker.
   133  func (h *HostVolumeChecker) SetVolumes(volumes map[string]*structs.VolumeRequest) {
   134  	lookupMap := make(map[string][]*structs.VolumeRequest)
   135  	// Convert the map from map[DesiredName]Request to map[Source][]Request to improve
   136  	// lookup performance. Also filter non-host volumes.
   137  	for _, req := range volumes {
   138  		if req.Type != structs.VolumeTypeHost {
   139  			continue
   140  		}
   141  
   142  		lookupMap[req.Source] = append(lookupMap[req.Source], req)
   143  	}
   144  	h.volumes = lookupMap
   145  }
   146  
   147  func (h *HostVolumeChecker) Feasible(candidate *structs.Node) bool {
   148  	if h.hasVolumes(candidate) {
   149  		return true
   150  	}
   151  
   152  	h.ctx.Metrics().FilterNode(candidate, FilterConstraintHostVolumes)
   153  	return false
   154  }
   155  
   156  func (h *HostVolumeChecker) hasVolumes(n *structs.Node) bool {
   157  	rLen := len(h.volumes)
   158  	hLen := len(n.HostVolumes)
   159  
   160  	// Fast path: Requested no volumes. No need to check further.
   161  	if rLen == 0 {
   162  		return true
   163  	}
   164  
   165  	// Fast path: Requesting more volumes than the node has, can't meet the criteria.
   166  	if rLen > hLen {
   167  		return false
   168  	}
   169  
   170  	for source, requests := range h.volumes {
   171  		nodeVolume, ok := n.HostVolumes[source]
   172  		if !ok {
   173  			return false
   174  		}
   175  
   176  		// If the volume supports being mounted as ReadWrite, we do not need to
   177  		// do further validation for readonly placement.
   178  		if !nodeVolume.ReadOnly {
   179  			continue
   180  		}
   181  
   182  		// The Volume can only be mounted ReadOnly, validate that no requests for
   183  		// it are ReadWrite.
   184  		for _, req := range requests {
   185  			if !req.ReadOnly {
   186  				return false
   187  			}
   188  		}
   189  	}
   190  
   191  	return true
   192  }
   193  
   194  type CSIVolumeChecker struct {
   195  	ctx       Context
   196  	namespace string
   197  	jobID     string
   198  	volumes   map[string]*structs.VolumeRequest
   199  }
   200  
   201  func NewCSIVolumeChecker(ctx Context) *CSIVolumeChecker {
   202  	return &CSIVolumeChecker{
   203  		ctx: ctx,
   204  	}
   205  }
   206  
   207  func (c *CSIVolumeChecker) SetJobID(jobID string) {
   208  	c.jobID = jobID
   209  }
   210  
   211  func (c *CSIVolumeChecker) SetNamespace(namespace string) {
   212  	c.namespace = namespace
   213  }
   214  
   215  func (c *CSIVolumeChecker) SetVolumes(volumes map[string]*structs.VolumeRequest) {
   216  	xs := make(map[string]*structs.VolumeRequest)
   217  	// Filter to only CSI Volumes
   218  	for alias, req := range volumes {
   219  		if req.Type != structs.VolumeTypeCSI {
   220  			continue
   221  		}
   222  
   223  		xs[alias] = req
   224  	}
   225  	c.volumes = xs
   226  }
   227  
   228  func (c *CSIVolumeChecker) Feasible(n *structs.Node) bool {
   229  	hasPlugins, failReason := c.hasPlugins(n)
   230  
   231  	if hasPlugins {
   232  		return true
   233  	}
   234  
   235  	c.ctx.Metrics().FilterNode(n, failReason)
   236  	return false
   237  }
   238  
   239  func (c *CSIVolumeChecker) hasPlugins(n *structs.Node) (bool, string) {
   240  	// We can mount the volume if
   241  	// - if required, a healthy controller plugin is running the driver
   242  	// - the volume has free claims, or this job owns the claims
   243  	// - this node is running the node plugin, implies matching topology
   244  
   245  	// Fast path: Requested no volumes. No need to check further.
   246  	if len(c.volumes) == 0 {
   247  		return true, ""
   248  	}
   249  
   250  	ws := memdb.NewWatchSet()
   251  
   252  	// Find the count per plugin for this node, so that can enforce MaxVolumes
   253  	pluginCount := map[string]int64{}
   254  	iter, err := c.ctx.State().CSIVolumesByNodeID(ws, n.ID)
   255  	if err != nil {
   256  		return false, FilterConstraintCSIVolumesLookupFailed
   257  	}
   258  	for {
   259  		raw := iter.Next()
   260  		if raw == nil {
   261  			break
   262  		}
   263  		vol, ok := raw.(*structs.CSIVolume)
   264  		if !ok {
   265  			continue
   266  		}
   267  		pluginCount[vol.PluginID] += 1
   268  	}
   269  
   270  	// For volume requests, find volumes and determine feasibility
   271  	for _, req := range c.volumes {
   272  		vol, err := c.ctx.State().CSIVolumeByID(ws, c.namespace, req.Source)
   273  		if err != nil {
   274  			return false, FilterConstraintCSIVolumesLookupFailed
   275  		}
   276  		if vol == nil {
   277  			return false, fmt.Sprintf(FilterConstraintCSIVolumeNotFoundTemplate, req.Source)
   278  		}
   279  
   280  		// Check that this node has a healthy running plugin with the right PluginID
   281  		plugin, ok := n.CSINodePlugins[vol.PluginID]
   282  		if !ok {
   283  			return false, fmt.Sprintf(FilterConstraintCSIPluginTemplate, vol.PluginID, n.ID)
   284  		}
   285  		if !plugin.Healthy {
   286  			return false, fmt.Sprintf(FilterConstraintCSIPluginUnhealthyTemplate, vol.PluginID, n.ID)
   287  		}
   288  		if pluginCount[vol.PluginID] >= plugin.NodeInfo.MaxVolumes {
   289  			return false, fmt.Sprintf(FilterConstraintCSIPluginMaxVolumesTemplate, vol.PluginID, n.ID)
   290  		}
   291  
   292  		if req.ReadOnly {
   293  			if !vol.ReadSchedulable() {
   294  				return false, fmt.Sprintf(FilterConstraintCSIVolumeNoReadTemplate, vol.ID)
   295  			}
   296  		} else {
   297  			if !vol.WriteSchedulable() {
   298  				return false, fmt.Sprintf(FilterConstraintCSIVolumeNoWriteTemplate, vol.ID)
   299  			}
   300  			if vol.WriteFreeClaims() {
   301  				return true, ""
   302  			}
   303  
   304  			// Check the blocking allocations to see if they belong to this job
   305  			for id := range vol.WriteAllocs {
   306  				a, err := c.ctx.State().AllocByID(ws, id)
   307  				if err != nil || a == nil || a.Namespace != c.namespace || a.JobID != c.jobID {
   308  					return false, fmt.Sprintf(FilterConstraintCSIVolumeInUseTemplate, vol.ID)
   309  				}
   310  			}
   311  		}
   312  	}
   313  
   314  	return true, ""
   315  }
   316  
   317  // DriverChecker is a FeasibilityChecker which returns whether a node has the
   318  // drivers necessary to scheduler a task group.
   319  type DriverChecker struct {
   320  	ctx     Context
   321  	drivers map[string]struct{}
   322  }
   323  
   324  // NewDriverChecker creates a DriverChecker from a set of drivers
   325  func NewDriverChecker(ctx Context, drivers map[string]struct{}) *DriverChecker {
   326  	return &DriverChecker{
   327  		ctx:     ctx,
   328  		drivers: drivers,
   329  	}
   330  }
   331  
   332  func (c *DriverChecker) SetDrivers(d map[string]struct{}) {
   333  	c.drivers = d
   334  }
   335  
   336  func (c *DriverChecker) Feasible(option *structs.Node) bool {
   337  	// Use this node if possible
   338  	if c.hasDrivers(option) {
   339  		return true
   340  	}
   341  	c.ctx.Metrics().FilterNode(option, FilterConstraintDrivers)
   342  	return false
   343  }
   344  
   345  // hasDrivers is used to check if the node has all the appropriate
   346  // drivers for this task group. Drivers are registered as node attribute
   347  // like "driver.docker=1" with their corresponding version.
   348  func (c *DriverChecker) hasDrivers(option *structs.Node) bool {
   349  	for driver := range c.drivers {
   350  		driverStr := fmt.Sprintf("driver.%s", driver)
   351  
   352  		// COMPAT: Remove in 0.10: As of Nomad 0.8, nodes have a DriverInfo that
   353  		// corresponds with every driver. As a Nomad server might be on a later
   354  		// version than a Nomad client, we need to check for compatibility here
   355  		// to verify the client supports this.
   356  		if driverInfo, ok := option.Drivers[driver]; ok {
   357  			if driverInfo == nil {
   358  				c.ctx.Logger().Named("driver_checker").Warn("node has no driver info set", "node_id", option.ID, "driver", driver)
   359  				return false
   360  			}
   361  
   362  			if driverInfo.Detected && driverInfo.Healthy {
   363  				continue
   364  			} else {
   365  				return false
   366  			}
   367  		}
   368  
   369  		value, ok := option.Attributes[driverStr]
   370  		if !ok {
   371  			return false
   372  		}
   373  
   374  		enabled, err := strconv.ParseBool(value)
   375  		if err != nil {
   376  			c.ctx.Logger().Named("driver_checker").Warn("node has invalid driver setting", "node_id", option.ID, "driver", driver, "val", value)
   377  			return false
   378  		}
   379  
   380  		if !enabled {
   381  			return false
   382  		}
   383  	}
   384  
   385  	return true
   386  }
   387  
   388  // DistinctHostsIterator is a FeasibleIterator which returns nodes that pass the
   389  // distinct_hosts constraint. The constraint ensures that multiple allocations
   390  // do not exist on the same node.
   391  type DistinctHostsIterator struct {
   392  	ctx    Context
   393  	source FeasibleIterator
   394  	tg     *structs.TaskGroup
   395  	job    *structs.Job
   396  
   397  	// Store whether the Job or TaskGroup has a distinct_hosts constraints so
   398  	// they don't have to be calculated every time Next() is called.
   399  	tgDistinctHosts  bool
   400  	jobDistinctHosts bool
   401  }
   402  
   403  // NewDistinctHostsIterator creates a DistinctHostsIterator from a source.
   404  func NewDistinctHostsIterator(ctx Context, source FeasibleIterator) *DistinctHostsIterator {
   405  	return &DistinctHostsIterator{
   406  		ctx:    ctx,
   407  		source: source,
   408  	}
   409  }
   410  
   411  func (iter *DistinctHostsIterator) SetTaskGroup(tg *structs.TaskGroup) {
   412  	iter.tg = tg
   413  	iter.tgDistinctHosts = iter.hasDistinctHostsConstraint(tg.Constraints)
   414  }
   415  
   416  func (iter *DistinctHostsIterator) SetJob(job *structs.Job) {
   417  	iter.job = job
   418  	iter.jobDistinctHosts = iter.hasDistinctHostsConstraint(job.Constraints)
   419  }
   420  
   421  func (iter *DistinctHostsIterator) hasDistinctHostsConstraint(constraints []*structs.Constraint) bool {
   422  	for _, con := range constraints {
   423  		if con.Operand == structs.ConstraintDistinctHosts {
   424  			return true
   425  		}
   426  	}
   427  
   428  	return false
   429  }
   430  
   431  func (iter *DistinctHostsIterator) Next() *structs.Node {
   432  	for {
   433  		// Get the next option from the source
   434  		option := iter.source.Next()
   435  
   436  		// Hot-path if the option is nil or there are no distinct_hosts or
   437  		// distinct_property constraints.
   438  		hosts := iter.jobDistinctHosts || iter.tgDistinctHosts
   439  		if option == nil || !hosts {
   440  			return option
   441  		}
   442  
   443  		// Check if the host constraints are satisfied
   444  		if !iter.satisfiesDistinctHosts(option) {
   445  			iter.ctx.Metrics().FilterNode(option, structs.ConstraintDistinctHosts)
   446  			continue
   447  		}
   448  
   449  		return option
   450  	}
   451  }
   452  
   453  // satisfiesDistinctHosts checks if the node satisfies a distinct_hosts
   454  // constraint either specified at the job level or the TaskGroup level.
   455  func (iter *DistinctHostsIterator) satisfiesDistinctHosts(option *structs.Node) bool {
   456  	// Check if there is no constraint set.
   457  	if !(iter.jobDistinctHosts || iter.tgDistinctHosts) {
   458  		return true
   459  	}
   460  
   461  	// Get the proposed allocations
   462  	proposed, err := iter.ctx.ProposedAllocs(option.ID)
   463  	if err != nil {
   464  		iter.ctx.Logger().Named("distinct_hosts").Error("failed to get proposed allocations", "error", err)
   465  		return false
   466  	}
   467  
   468  	// Skip the node if the task group has already been allocated on it.
   469  	for _, alloc := range proposed {
   470  		// If the job has a distinct_hosts constraint we only need an alloc
   471  		// collision on the JobID but if the constraint is on the TaskGroup then
   472  		// we need both a job and TaskGroup collision.
   473  		jobCollision := alloc.JobID == iter.job.ID
   474  		taskCollision := alloc.TaskGroup == iter.tg.Name
   475  		if iter.jobDistinctHosts && jobCollision || jobCollision && taskCollision {
   476  			return false
   477  		}
   478  	}
   479  
   480  	return true
   481  }
   482  
   483  func (iter *DistinctHostsIterator) Reset() {
   484  	iter.source.Reset()
   485  }
   486  
   487  // DistinctPropertyIterator is a FeasibleIterator which returns nodes that pass the
   488  // distinct_property constraint. The constraint ensures that multiple allocations
   489  // do not use the same value of the given property.
   490  type DistinctPropertyIterator struct {
   491  	ctx    Context
   492  	source FeasibleIterator
   493  	tg     *structs.TaskGroup
   494  	job    *structs.Job
   495  
   496  	hasDistinctPropertyConstraints bool
   497  	jobPropertySets                []*propertySet
   498  	groupPropertySets              map[string][]*propertySet
   499  }
   500  
   501  // NewDistinctPropertyIterator creates a DistinctPropertyIterator from a source.
   502  func NewDistinctPropertyIterator(ctx Context, source FeasibleIterator) *DistinctPropertyIterator {
   503  	return &DistinctPropertyIterator{
   504  		ctx:               ctx,
   505  		source:            source,
   506  		groupPropertySets: make(map[string][]*propertySet),
   507  	}
   508  }
   509  
   510  func (iter *DistinctPropertyIterator) SetTaskGroup(tg *structs.TaskGroup) {
   511  	iter.tg = tg
   512  
   513  	// Build the property set at the taskgroup level
   514  	if _, ok := iter.groupPropertySets[tg.Name]; !ok {
   515  		for _, c := range tg.Constraints {
   516  			if c.Operand != structs.ConstraintDistinctProperty {
   517  				continue
   518  			}
   519  
   520  			pset := NewPropertySet(iter.ctx, iter.job)
   521  			pset.SetTGConstraint(c, tg.Name)
   522  			iter.groupPropertySets[tg.Name] = append(iter.groupPropertySets[tg.Name], pset)
   523  		}
   524  	}
   525  
   526  	// Check if there is a distinct property
   527  	iter.hasDistinctPropertyConstraints = len(iter.jobPropertySets) != 0 || len(iter.groupPropertySets[tg.Name]) != 0
   528  }
   529  
   530  func (iter *DistinctPropertyIterator) SetJob(job *structs.Job) {
   531  	iter.job = job
   532  
   533  	// Build the property set at the job level
   534  	for _, c := range job.Constraints {
   535  		if c.Operand != structs.ConstraintDistinctProperty {
   536  			continue
   537  		}
   538  
   539  		pset := NewPropertySet(iter.ctx, job)
   540  		pset.SetJobConstraint(c)
   541  		iter.jobPropertySets = append(iter.jobPropertySets, pset)
   542  	}
   543  }
   544  
   545  func (iter *DistinctPropertyIterator) Next() *structs.Node {
   546  	for {
   547  		// Get the next option from the source
   548  		option := iter.source.Next()
   549  
   550  		// Hot path if there is nothing to check
   551  		if option == nil || !iter.hasDistinctPropertyConstraints {
   552  			return option
   553  		}
   554  
   555  		// Check if the constraints are met
   556  		if !iter.satisfiesProperties(option, iter.jobPropertySets) ||
   557  			!iter.satisfiesProperties(option, iter.groupPropertySets[iter.tg.Name]) {
   558  			continue
   559  		}
   560  
   561  		return option
   562  	}
   563  }
   564  
   565  // satisfiesProperties returns whether the option satisfies the set of
   566  // properties. If not it will be filtered.
   567  func (iter *DistinctPropertyIterator) satisfiesProperties(option *structs.Node, set []*propertySet) bool {
   568  	for _, ps := range set {
   569  		if satisfies, reason := ps.SatisfiesDistinctProperties(option, iter.tg.Name); !satisfies {
   570  			iter.ctx.Metrics().FilterNode(option, reason)
   571  			return false
   572  		}
   573  	}
   574  
   575  	return true
   576  }
   577  
   578  func (iter *DistinctPropertyIterator) Reset() {
   579  	iter.source.Reset()
   580  
   581  	for _, ps := range iter.jobPropertySets {
   582  		ps.PopulateProposed()
   583  	}
   584  
   585  	for _, sets := range iter.groupPropertySets {
   586  		for _, ps := range sets {
   587  			ps.PopulateProposed()
   588  		}
   589  	}
   590  }
   591  
   592  // ConstraintChecker is a FeasibilityChecker which returns nodes that match a
   593  // given set of constraints. This is used to filter on job, task group, and task
   594  // constraints.
   595  type ConstraintChecker struct {
   596  	ctx         Context
   597  	constraints []*structs.Constraint
   598  }
   599  
   600  // NewConstraintChecker creates a ConstraintChecker for a set of constraints
   601  func NewConstraintChecker(ctx Context, constraints []*structs.Constraint) *ConstraintChecker {
   602  	return &ConstraintChecker{
   603  		ctx:         ctx,
   604  		constraints: constraints,
   605  	}
   606  }
   607  
   608  func (c *ConstraintChecker) SetConstraints(constraints []*structs.Constraint) {
   609  	c.constraints = constraints
   610  }
   611  
   612  func (c *ConstraintChecker) Feasible(option *structs.Node) bool {
   613  	// Use this node if possible
   614  	for _, constraint := range c.constraints {
   615  		if !c.meetsConstraint(constraint, option) {
   616  			c.ctx.Metrics().FilterNode(option, constraint.String())
   617  			return false
   618  		}
   619  	}
   620  	return true
   621  }
   622  
   623  func (c *ConstraintChecker) meetsConstraint(constraint *structs.Constraint, option *structs.Node) bool {
   624  	// Resolve the targets. Targets that are not present are treated as `nil`.
   625  	// This is to allow for matching constraints where a target is not present.
   626  	lVal, lOk := resolveTarget(constraint.LTarget, option)
   627  	rVal, rOk := resolveTarget(constraint.RTarget, option)
   628  
   629  	// Check if satisfied
   630  	return checkConstraint(c.ctx, constraint.Operand, lVal, rVal, lOk, rOk)
   631  }
   632  
   633  // resolveTarget is used to resolve the LTarget and RTarget of a Constraint.
   634  func resolveTarget(target string, node *structs.Node) (interface{}, bool) {
   635  	// If no prefix, this must be a literal value
   636  	if !strings.HasPrefix(target, "${") {
   637  		return target, true
   638  	}
   639  
   640  	// Handle the interpolations
   641  	switch {
   642  	case "${node.unique.id}" == target:
   643  		return node.ID, true
   644  
   645  	case "${node.datacenter}" == target:
   646  		return node.Datacenter, true
   647  
   648  	case "${node.unique.name}" == target:
   649  		return node.Name, true
   650  
   651  	case "${node.class}" == target:
   652  		return node.NodeClass, true
   653  
   654  	case strings.HasPrefix(target, "${attr."):
   655  		attr := strings.TrimSuffix(strings.TrimPrefix(target, "${attr."), "}")
   656  		val, ok := node.Attributes[attr]
   657  		return val, ok
   658  
   659  	case strings.HasPrefix(target, "${meta."):
   660  		meta := strings.TrimSuffix(strings.TrimPrefix(target, "${meta."), "}")
   661  		val, ok := node.Meta[meta]
   662  		return val, ok
   663  
   664  	default:
   665  		return nil, false
   666  	}
   667  }
   668  
   669  // checkConstraint checks if a constraint is satisfied. The lVal and rVal
   670  // interfaces may be nil.
   671  func checkConstraint(ctx Context, operand string, lVal, rVal interface{}, lFound, rFound bool) bool {
   672  	// Check for constraints not handled by this checker.
   673  	switch operand {
   674  	case structs.ConstraintDistinctHosts, structs.ConstraintDistinctProperty:
   675  		return true
   676  	default:
   677  		break
   678  	}
   679  
   680  	switch operand {
   681  	case "=", "==", "is":
   682  		return lFound && rFound && reflect.DeepEqual(lVal, rVal)
   683  	case "!=", "not":
   684  		return !reflect.DeepEqual(lVal, rVal)
   685  	case "<", "<=", ">", ">=":
   686  		return lFound && rFound && checkLexicalOrder(operand, lVal, rVal)
   687  	case structs.ConstraintAttributeIsSet:
   688  		return lFound
   689  	case structs.ConstraintAttributeIsNotSet:
   690  		return !lFound
   691  	case structs.ConstraintVersion:
   692  		parser := newVersionConstraintParser(ctx)
   693  		return lFound && rFound && checkVersionMatch(ctx, parser, lVal, rVal)
   694  	case structs.ConstraintSemver:
   695  		parser := newSemverConstraintParser(ctx)
   696  		return lFound && rFound && checkVersionMatch(ctx, parser, lVal, rVal)
   697  	case structs.ConstraintRegex:
   698  		return lFound && rFound && checkRegexpMatch(ctx, lVal, rVal)
   699  	case structs.ConstraintSetContains, structs.ConstraintSetContainsAll:
   700  		return lFound && rFound && checkSetContainsAll(ctx, lVal, rVal)
   701  	case structs.ConstraintSetContainsAny:
   702  		return lFound && rFound && checkSetContainsAny(lVal, rVal)
   703  	default:
   704  		return false
   705  	}
   706  }
   707  
   708  // checkAffinity checks if a specific affinity is satisfied
   709  func checkAffinity(ctx Context, operand string, lVal, rVal interface{}, lFound, rFound bool) bool {
   710  	return checkConstraint(ctx, operand, lVal, rVal, lFound, rFound)
   711  }
   712  
   713  // checkAttributeAffinity checks if an affinity is satisfied
   714  func checkAttributeAffinity(ctx Context, operand string, lVal, rVal *psstructs.Attribute, lFound, rFound bool) bool {
   715  	return checkAttributeConstraint(ctx, operand, lVal, rVal, lFound, rFound)
   716  }
   717  
   718  // checkLexicalOrder is used to check for lexical ordering
   719  func checkLexicalOrder(op string, lVal, rVal interface{}) bool {
   720  	// Ensure the values are strings
   721  	lStr, ok := lVal.(string)
   722  	if !ok {
   723  		return false
   724  	}
   725  	rStr, ok := rVal.(string)
   726  	if !ok {
   727  		return false
   728  	}
   729  
   730  	switch op {
   731  	case "<":
   732  		return lStr < rStr
   733  	case "<=":
   734  		return lStr <= rStr
   735  	case ">":
   736  		return lStr > rStr
   737  	case ">=":
   738  		return lStr >= rStr
   739  	default:
   740  		return false
   741  	}
   742  }
   743  
   744  // checkVersionMatch is used to compare a version on the
   745  // left hand side with a set of constraints on the right hand side
   746  func checkVersionMatch(ctx Context, parse verConstraintParser, lVal, rVal interface{}) bool {
   747  	// Parse the version
   748  	var versionStr string
   749  	switch v := lVal.(type) {
   750  	case string:
   751  		versionStr = v
   752  	case int:
   753  		versionStr = fmt.Sprintf("%d", v)
   754  	default:
   755  		return false
   756  	}
   757  
   758  	// Parse the version
   759  	vers, err := version.NewVersion(versionStr)
   760  	if err != nil {
   761  		return false
   762  	}
   763  
   764  	// Constraint must be a string
   765  	constraintStr, ok := rVal.(string)
   766  	if !ok {
   767  		return false
   768  	}
   769  
   770  	// Parse the constraints
   771  	constraints := parse(constraintStr)
   772  	if constraints == nil {
   773  		return false
   774  	}
   775  
   776  	// Check the constraints against the version
   777  	return constraints.Check(vers)
   778  }
   779  
   780  // checkAttributeVersionMatch is used to compare a version on the
   781  // left hand side with a set of constraints on the right hand side
   782  func checkAttributeVersionMatch(ctx Context, parse verConstraintParser, lVal, rVal *psstructs.Attribute) bool {
   783  	// Parse the version
   784  	var versionStr string
   785  	if s, ok := lVal.GetString(); ok {
   786  		versionStr = s
   787  	} else if i, ok := lVal.GetInt(); ok {
   788  		versionStr = fmt.Sprintf("%d", i)
   789  	} else {
   790  		return false
   791  	}
   792  
   793  	// Parse the version
   794  	vers, err := version.NewVersion(versionStr)
   795  	if err != nil {
   796  		return false
   797  	}
   798  
   799  	// Constraint must be a string
   800  	constraintStr, ok := rVal.GetString()
   801  	if !ok {
   802  		return false
   803  	}
   804  
   805  	// Parse the constraints
   806  	constraints := parse(constraintStr)
   807  	if constraints == nil {
   808  		return false
   809  	}
   810  
   811  	// Check the constraints against the version
   812  	return constraints.Check(vers)
   813  }
   814  
   815  // checkRegexpMatch is used to compare a value on the
   816  // left hand side with a regexp on the right hand side
   817  func checkRegexpMatch(ctx Context, lVal, rVal interface{}) bool {
   818  	// Ensure left-hand is string
   819  	lStr, ok := lVal.(string)
   820  	if !ok {
   821  		return false
   822  	}
   823  
   824  	// Regexp must be a string
   825  	regexpStr, ok := rVal.(string)
   826  	if !ok {
   827  		return false
   828  	}
   829  
   830  	// Check the cache
   831  	cache := ctx.RegexpCache()
   832  	re := cache[regexpStr]
   833  
   834  	// Parse the regexp
   835  	if re == nil {
   836  		var err error
   837  		re, err = regexp.Compile(regexpStr)
   838  		if err != nil {
   839  			return false
   840  		}
   841  		cache[regexpStr] = re
   842  	}
   843  
   844  	// Look for a match
   845  	return re.MatchString(lStr)
   846  }
   847  
   848  // checkSetContainsAll is used to see if the left hand side contains the
   849  // string on the right hand side
   850  func checkSetContainsAll(ctx Context, lVal, rVal interface{}) bool {
   851  	// Ensure left-hand is string
   852  	lStr, ok := lVal.(string)
   853  	if !ok {
   854  		return false
   855  	}
   856  
   857  	// Regexp must be a string
   858  	rStr, ok := rVal.(string)
   859  	if !ok {
   860  		return false
   861  	}
   862  
   863  	input := strings.Split(lStr, ",")
   864  	lookup := make(map[string]struct{}, len(input))
   865  	for _, in := range input {
   866  		cleaned := strings.TrimSpace(in)
   867  		lookup[cleaned] = struct{}{}
   868  	}
   869  
   870  	for _, r := range strings.Split(rStr, ",") {
   871  		cleaned := strings.TrimSpace(r)
   872  		if _, ok := lookup[cleaned]; !ok {
   873  			return false
   874  		}
   875  	}
   876  
   877  	return true
   878  }
   879  
   880  // checkSetContainsAny is used to see if the left hand side contains any
   881  // values on the right hand side
   882  func checkSetContainsAny(lVal, rVal interface{}) bool {
   883  	// Ensure left-hand is string
   884  	lStr, ok := lVal.(string)
   885  	if !ok {
   886  		return false
   887  	}
   888  
   889  	// RHS must be a string
   890  	rStr, ok := rVal.(string)
   891  	if !ok {
   892  		return false
   893  	}
   894  
   895  	input := strings.Split(lStr, ",")
   896  	lookup := make(map[string]struct{}, len(input))
   897  	for _, in := range input {
   898  		cleaned := strings.TrimSpace(in)
   899  		lookup[cleaned] = struct{}{}
   900  	}
   901  
   902  	for _, r := range strings.Split(rStr, ",") {
   903  		cleaned := strings.TrimSpace(r)
   904  		if _, ok := lookup[cleaned]; ok {
   905  			return true
   906  		}
   907  	}
   908  
   909  	return false
   910  }
   911  
   912  // FeasibilityWrapper is a FeasibleIterator which wraps both job and task group
   913  // FeasibilityCheckers in which feasibility checking can be skipped if the
   914  // computed node class has previously been marked as eligible or ineligible.
   915  type FeasibilityWrapper struct {
   916  	ctx         Context
   917  	source      FeasibleIterator
   918  	jobCheckers []FeasibilityChecker
   919  	tgCheckers  []FeasibilityChecker
   920  	tgAvailable []FeasibilityChecker
   921  	tg          string
   922  }
   923  
   924  // NewFeasibilityWrapper returns a FeasibleIterator based on the passed source
   925  // and FeasibilityCheckers.
   926  func NewFeasibilityWrapper(ctx Context, source FeasibleIterator,
   927  	jobCheckers, tgCheckers, tgAvailable []FeasibilityChecker) *FeasibilityWrapper {
   928  	return &FeasibilityWrapper{
   929  		ctx:         ctx,
   930  		source:      source,
   931  		jobCheckers: jobCheckers,
   932  		tgCheckers:  tgCheckers,
   933  		tgAvailable: tgAvailable,
   934  	}
   935  }
   936  
   937  func (w *FeasibilityWrapper) SetTaskGroup(tg string) {
   938  	w.tg = tg
   939  }
   940  
   941  func (w *FeasibilityWrapper) Reset() {
   942  	w.source.Reset()
   943  }
   944  
   945  // Next returns an eligible node, only running the FeasibilityCheckers as needed
   946  // based on the sources computed node class.
   947  func (w *FeasibilityWrapper) Next() *structs.Node {
   948  	evalElig := w.ctx.Eligibility()
   949  	metrics := w.ctx.Metrics()
   950  
   951  OUTER:
   952  	for {
   953  		// Get the next option from the source
   954  		option := w.source.Next()
   955  		if option == nil {
   956  			return nil
   957  		}
   958  
   959  		// Check if the job has been marked as eligible or ineligible.
   960  		jobEscaped, jobUnknown := false, false
   961  		switch evalElig.JobStatus(option.ComputedClass) {
   962  		case EvalComputedClassIneligible:
   963  			// Fast path the ineligible case
   964  			metrics.FilterNode(option, "computed class ineligible")
   965  			continue
   966  		case EvalComputedClassEscaped:
   967  			jobEscaped = true
   968  		case EvalComputedClassUnknown:
   969  			jobUnknown = true
   970  		}
   971  
   972  		// Run the job feasibility checks.
   973  		for _, check := range w.jobCheckers {
   974  			feasible := check.Feasible(option)
   975  			if !feasible {
   976  				// If the job hasn't escaped, set it to be ineligible since it
   977  				// failed a job check.
   978  				if !jobEscaped {
   979  					evalElig.SetJobEligibility(false, option.ComputedClass)
   980  				}
   981  				continue OUTER
   982  			}
   983  		}
   984  
   985  		// Set the job eligibility if the constraints weren't escaped and it
   986  		// hasn't been set before.
   987  		if !jobEscaped && jobUnknown {
   988  			evalElig.SetJobEligibility(true, option.ComputedClass)
   989  		}
   990  
   991  		// Check if the task group has been marked as eligible or ineligible.
   992  		tgEscaped, tgUnknown := false, false
   993  		switch evalElig.TaskGroupStatus(w.tg, option.ComputedClass) {
   994  		case EvalComputedClassIneligible:
   995  			// Fast path the ineligible case
   996  			metrics.FilterNode(option, "computed class ineligible")
   997  			continue
   998  		case EvalComputedClassEligible:
   999  			// Fast path the eligible case
  1000  			if w.available(option) {
  1001  				return option
  1002  			}
  1003  			// We match the class but are temporarily unavailable, the eval
  1004  			// should be blocked
  1005  			return nil
  1006  		case EvalComputedClassEscaped:
  1007  			tgEscaped = true
  1008  		case EvalComputedClassUnknown:
  1009  			tgUnknown = true
  1010  		}
  1011  
  1012  		// Run the task group feasibility checks.
  1013  		for _, check := range w.tgCheckers {
  1014  			feasible := check.Feasible(option)
  1015  			if !feasible {
  1016  				// If the task group hasn't escaped, set it to be ineligible
  1017  				// since it failed a check.
  1018  				if !tgEscaped {
  1019  					evalElig.SetTaskGroupEligibility(false, w.tg, option.ComputedClass)
  1020  				}
  1021  				continue OUTER
  1022  			}
  1023  		}
  1024  
  1025  		// Set the task group eligibility if the constraints weren't escaped and
  1026  		// it hasn't been set before.
  1027  		if !tgEscaped && tgUnknown {
  1028  			evalElig.SetTaskGroupEligibility(true, w.tg, option.ComputedClass)
  1029  		}
  1030  
  1031  		// tgAvailable handlers are available transiently, so we test them without
  1032  		// affecting the computed class
  1033  		if !w.available(option) {
  1034  			continue OUTER
  1035  		}
  1036  
  1037  		return option
  1038  	}
  1039  }
  1040  
  1041  // available checks transient feasibility checkers which depend on changing conditions,
  1042  // e.g. the health status of a plugin or driver
  1043  func (w *FeasibilityWrapper) available(option *structs.Node) bool {
  1044  	// If we don't have any availability checks, we're available
  1045  	if len(w.tgAvailable) == 0 {
  1046  		return true
  1047  	}
  1048  
  1049  	for _, check := range w.tgAvailable {
  1050  		if !check.Feasible(option) {
  1051  			return false
  1052  		}
  1053  	}
  1054  	return true
  1055  }
  1056  
  1057  // DeviceChecker is a FeasibilityChecker which returns whether a node has the
  1058  // devices necessary to scheduler a task group.
  1059  type DeviceChecker struct {
  1060  	ctx Context
  1061  
  1062  	// required is the set of requested devices that must exist on the node
  1063  	required []*structs.RequestedDevice
  1064  
  1065  	// requiresDevices indicates if the task group requires devices
  1066  	requiresDevices bool
  1067  }
  1068  
  1069  // NewDeviceChecker creates a DeviceChecker
  1070  func NewDeviceChecker(ctx Context) *DeviceChecker {
  1071  	return &DeviceChecker{
  1072  		ctx: ctx,
  1073  	}
  1074  }
  1075  
  1076  func (c *DeviceChecker) SetTaskGroup(tg *structs.TaskGroup) {
  1077  	c.required = nil
  1078  	for _, task := range tg.Tasks {
  1079  		c.required = append(c.required, task.Resources.Devices...)
  1080  	}
  1081  	c.requiresDevices = len(c.required) != 0
  1082  }
  1083  
  1084  func (c *DeviceChecker) Feasible(option *structs.Node) bool {
  1085  	if c.hasDevices(option) {
  1086  		return true
  1087  	}
  1088  
  1089  	c.ctx.Metrics().FilterNode(option, FilterConstraintDevices)
  1090  	return false
  1091  }
  1092  
  1093  func (c *DeviceChecker) hasDevices(option *structs.Node) bool {
  1094  	if !c.requiresDevices {
  1095  		return true
  1096  	}
  1097  
  1098  	// COMPAT(0.11): Remove in 0.11
  1099  	// The node does not have the new resources object so it can not have any
  1100  	// devices
  1101  	if option.NodeResources == nil {
  1102  		return false
  1103  	}
  1104  
  1105  	// Check if the node has any devices
  1106  	nodeDevs := option.NodeResources.Devices
  1107  	if len(nodeDevs) == 0 {
  1108  		return false
  1109  	}
  1110  
  1111  	// Create a mapping of node devices to the remaining count
  1112  	available := make(map[*structs.NodeDeviceResource]uint64, len(nodeDevs))
  1113  	for _, d := range nodeDevs {
  1114  		var healthy uint64 = 0
  1115  		for _, instance := range d.Instances {
  1116  			if instance.Healthy {
  1117  				healthy++
  1118  			}
  1119  		}
  1120  		if healthy != 0 {
  1121  			available[d] = healthy
  1122  		}
  1123  	}
  1124  
  1125  	// Go through the required devices trying to find matches
  1126  OUTER:
  1127  	for _, req := range c.required {
  1128  		// Determine how many there are to place
  1129  		desiredCount := req.Count
  1130  
  1131  		// Go through the device resources and see if we have a match
  1132  		for d, unused := range available {
  1133  			if unused == 0 {
  1134  				// Depleted
  1135  				continue
  1136  			}
  1137  
  1138  			// First check we have enough instances of the device since this is
  1139  			// cheaper than checking the constraints
  1140  			if unused < desiredCount {
  1141  				continue
  1142  			}
  1143  
  1144  			// Check the constraints
  1145  			if nodeDeviceMatches(c.ctx, d, req) {
  1146  				// Consume the instances
  1147  				available[d] -= desiredCount
  1148  
  1149  				// Move on to the next request
  1150  				continue OUTER
  1151  			}
  1152  		}
  1153  
  1154  		// We couldn't match the request for the device
  1155  		return false
  1156  	}
  1157  
  1158  	// Only satisfied if there are no more devices to place
  1159  	return true
  1160  }
  1161  
  1162  // nodeDeviceMatches checks if the device matches the request and its
  1163  // constraints. It doesn't check the count.
  1164  func nodeDeviceMatches(ctx Context, d *structs.NodeDeviceResource, req *structs.RequestedDevice) bool {
  1165  	if !d.ID().Matches(req.ID()) {
  1166  		return false
  1167  	}
  1168  
  1169  	// There are no constraints to consider
  1170  	if len(req.Constraints) == 0 {
  1171  		return true
  1172  	}
  1173  
  1174  	for _, c := range req.Constraints {
  1175  		// Resolve the targets
  1176  		lVal, lOk := resolveDeviceTarget(c.LTarget, d)
  1177  		rVal, rOk := resolveDeviceTarget(c.RTarget, d)
  1178  
  1179  		// Check if satisfied
  1180  		if !checkAttributeConstraint(ctx, c.Operand, lVal, rVal, lOk, rOk) {
  1181  			return false
  1182  		}
  1183  	}
  1184  
  1185  	return true
  1186  }
  1187  
  1188  // resolveDeviceTarget is used to resolve the LTarget and RTarget of a Constraint
  1189  // when used on a device
  1190  func resolveDeviceTarget(target string, d *structs.NodeDeviceResource) (*psstructs.Attribute, bool) {
  1191  	// If no prefix, this must be a literal value
  1192  	if !strings.HasPrefix(target, "${") {
  1193  		return psstructs.ParseAttribute(target), true
  1194  	}
  1195  
  1196  	// Handle the interpolations
  1197  	switch {
  1198  	case "${device.model}" == target:
  1199  		return psstructs.NewStringAttribute(d.Name), true
  1200  
  1201  	case "${device.vendor}" == target:
  1202  		return psstructs.NewStringAttribute(d.Vendor), true
  1203  
  1204  	case "${device.type}" == target:
  1205  		return psstructs.NewStringAttribute(d.Type), true
  1206  
  1207  	case strings.HasPrefix(target, "${device.attr."):
  1208  		attr := strings.TrimPrefix(target, "${device.attr.")
  1209  		attr = strings.TrimSuffix(attr, "}")
  1210  		val, ok := d.Attributes[attr]
  1211  		return val, ok
  1212  
  1213  	default:
  1214  		return nil, false
  1215  	}
  1216  }
  1217  
  1218  // checkAttributeConstraint checks if a constraint is satisfied. nil equality
  1219  // comparisons are considered to be false.
  1220  func checkAttributeConstraint(ctx Context, operand string, lVal, rVal *psstructs.Attribute, lFound, rFound bool) bool {
  1221  	// Check for constraints not handled by this checker.
  1222  	switch operand {
  1223  	case structs.ConstraintDistinctHosts, structs.ConstraintDistinctProperty:
  1224  		return true
  1225  	default:
  1226  		break
  1227  	}
  1228  
  1229  	switch operand {
  1230  	case "!=", "not":
  1231  		// Neither value was provided, nil != nil == false
  1232  		if !(lFound || rFound) {
  1233  			return false
  1234  		}
  1235  
  1236  		// Only 1 value was provided, therefore nil != some == true
  1237  		if lFound != rFound {
  1238  			return true
  1239  		}
  1240  
  1241  		// Both values were provided, so actually compare them
  1242  		v, ok := lVal.Compare(rVal)
  1243  		if !ok {
  1244  			return false
  1245  		}
  1246  
  1247  		return v != 0
  1248  
  1249  	case "<", "<=", ">", ">=", "=", "==", "is":
  1250  		if !(lFound && rFound) {
  1251  			return false
  1252  		}
  1253  
  1254  		v, ok := lVal.Compare(rVal)
  1255  		if !ok {
  1256  			return false
  1257  		}
  1258  
  1259  		switch operand {
  1260  		case "is", "==", "=":
  1261  			return v == 0
  1262  		case "<":
  1263  			return v == -1
  1264  		case "<=":
  1265  			return v != 1
  1266  		case ">":
  1267  			return v == 1
  1268  		case ">=":
  1269  			return v != -1
  1270  		default:
  1271  			return false
  1272  		}
  1273  
  1274  	case structs.ConstraintVersion:
  1275  		if !(lFound && rFound) {
  1276  			return false
  1277  		}
  1278  
  1279  		parser := newVersionConstraintParser(ctx)
  1280  		return checkAttributeVersionMatch(ctx, parser, lVal, rVal)
  1281  
  1282  	case structs.ConstraintSemver:
  1283  		if !(lFound && rFound) {
  1284  			return false
  1285  		}
  1286  
  1287  		parser := newSemverConstraintParser(ctx)
  1288  		return checkAttributeVersionMatch(ctx, parser, lVal, rVal)
  1289  
  1290  	case structs.ConstraintRegex:
  1291  		if !(lFound && rFound) {
  1292  			return false
  1293  		}
  1294  
  1295  		ls, ok := lVal.GetString()
  1296  		rs, ok2 := rVal.GetString()
  1297  		if !ok || !ok2 {
  1298  			return false
  1299  		}
  1300  		return checkRegexpMatch(ctx, ls, rs)
  1301  	case structs.ConstraintSetContains, structs.ConstraintSetContainsAll:
  1302  		if !(lFound && rFound) {
  1303  			return false
  1304  		}
  1305  
  1306  		ls, ok := lVal.GetString()
  1307  		rs, ok2 := rVal.GetString()
  1308  		if !ok || !ok2 {
  1309  			return false
  1310  		}
  1311  
  1312  		return checkSetContainsAll(ctx, ls, rs)
  1313  	case structs.ConstraintSetContainsAny:
  1314  		if !(lFound && rFound) {
  1315  			return false
  1316  		}
  1317  
  1318  		ls, ok := lVal.GetString()
  1319  		rs, ok2 := rVal.GetString()
  1320  		if !ok || !ok2 {
  1321  			return false
  1322  		}
  1323  
  1324  		return checkSetContainsAny(ls, rs)
  1325  	case structs.ConstraintAttributeIsSet:
  1326  		return lFound
  1327  	case structs.ConstraintAttributeIsNotSet:
  1328  		return !lFound
  1329  	default:
  1330  		return false
  1331  	}
  1332  
  1333  }
  1334  
  1335  // VerConstraints is the interface implemented by both go-verson constraints
  1336  // and semver constraints.
  1337  type VerConstraints interface {
  1338  	Check(v *version.Version) bool
  1339  	String() string
  1340  }
  1341  
  1342  // verConstraintParser returns a version constraints implementation (go-version
  1343  // or semver).
  1344  type verConstraintParser func(verConstraint string) VerConstraints
  1345  
  1346  func newVersionConstraintParser(ctx Context) verConstraintParser {
  1347  	cache := ctx.VersionConstraintCache()
  1348  
  1349  	return func(cstr string) VerConstraints {
  1350  		if c := cache[cstr]; c != nil {
  1351  			return c
  1352  		}
  1353  
  1354  		constraints, err := version.NewConstraint(cstr)
  1355  		if err != nil {
  1356  			return nil
  1357  		}
  1358  		cache[cstr] = constraints
  1359  
  1360  		return constraints
  1361  	}
  1362  }
  1363  
  1364  func newSemverConstraintParser(ctx Context) verConstraintParser {
  1365  	cache := ctx.SemverConstraintCache()
  1366  
  1367  	return func(cstr string) VerConstraints {
  1368  		if c := cache[cstr]; c != nil {
  1369  			return c
  1370  		}
  1371  
  1372  		constraints, err := semver.NewConstraint(cstr)
  1373  		if err != nil {
  1374  			return nil
  1375  		}
  1376  		cache[cstr] = constraints
  1377  
  1378  		return constraints
  1379  	}
  1380  }