github.com/uchennaokeke444/nomad@v0.11.8/scheduler/stack_oss.go (about) 1 // +build !ent 2 3 package scheduler 4 5 // NewGenericStack constructs a stack used for selecting service placements 6 func NewGenericStack(batch bool, ctx Context) *GenericStack { 7 // Create a new stack 8 s := &GenericStack{ 9 batch: batch, 10 ctx: ctx, 11 } 12 13 // Create the source iterator. We randomize the order we visit nodes 14 // to reduce collisions between schedulers and to do a basic load 15 // balancing across eligible nodes. 16 s.source = NewRandomIterator(ctx, nil) 17 18 // Create the quota iterator to determine if placements would result in the 19 // quota attached to the namespace of the job to go over. 20 s.quota = NewQuotaIterator(ctx, s.source) 21 22 // Attach the job constraints. The job is filled in later. 23 s.jobConstraint = NewConstraintChecker(ctx, nil) 24 25 // Filter on task group drivers first as they are faster 26 s.taskGroupDrivers = NewDriverChecker(ctx, nil) 27 28 // Filter on task group constraints second 29 s.taskGroupConstraint = NewConstraintChecker(ctx, nil) 30 31 // Filter on task group devices 32 s.taskGroupDevices = NewDeviceChecker(ctx) 33 34 // Filter on task group host volumes 35 s.taskGroupHostVolumes = NewHostVolumeChecker(ctx) 36 37 // Filter on available, healthy CSI plugins 38 s.taskGroupCSIVolumes = NewCSIVolumeChecker(ctx) 39 40 // Create the feasibility wrapper which wraps all feasibility checks in 41 // which feasibility checking can be skipped if the computed node class has 42 // previously been marked as eligible or ineligible. Generally this will be 43 // checks that only needs to examine the single node to determine feasibility. 44 jobs := []FeasibilityChecker{s.jobConstraint} 45 tgs := []FeasibilityChecker{s.taskGroupDrivers, 46 s.taskGroupConstraint, 47 s.taskGroupHostVolumes, 48 s.taskGroupDevices} 49 avail := []FeasibilityChecker{s.taskGroupCSIVolumes} 50 s.wrappedChecks = NewFeasibilityWrapper(ctx, s.quota, jobs, tgs, avail) 51 52 // Filter on distinct host constraints. 53 s.distinctHostsConstraint = NewDistinctHostsIterator(ctx, s.wrappedChecks) 54 55 // Filter on distinct property constraints. 56 s.distinctPropertyConstraint = NewDistinctPropertyIterator(ctx, s.distinctHostsConstraint) 57 58 // Upgrade from feasible to rank iterator 59 rankSource := NewFeasibleRankIterator(ctx, s.distinctPropertyConstraint) 60 61 // Apply the bin packing, this depends on the resources needed 62 // by a particular task group. 63 _, schedConfig, _ := s.ctx.State().SchedulerConfig() 64 schedulerAlgorithm := schedConfig.EffectiveSchedulerAlgorithm() 65 66 s.binPack = NewBinPackIterator(ctx, rankSource, false, 0, schedulerAlgorithm) 67 68 // Apply the job anti-affinity iterator. This is to avoid placing 69 // multiple allocations on the same node for this job. 70 s.jobAntiAff = NewJobAntiAffinityIterator(ctx, s.binPack, "") 71 72 // Apply node rescheduling penalty. This tries to avoid placing on a 73 // node where the allocation failed previously 74 s.nodeReschedulingPenalty = NewNodeReschedulingPenaltyIterator(ctx, s.jobAntiAff) 75 76 // Apply scores based on affinity stanza 77 s.nodeAffinity = NewNodeAffinityIterator(ctx, s.nodeReschedulingPenalty) 78 79 // Apply scores based on spread stanza 80 s.spread = NewSpreadIterator(ctx, s.nodeAffinity) 81 82 // Normalizes scores by averaging them across various scorers 83 s.scoreNorm = NewScoreNormalizationIterator(ctx, s.spread) 84 85 // Apply a limit function. This is to avoid scanning *every* possible node. 86 s.limit = NewLimitIterator(ctx, s.scoreNorm, 2, skipScoreThreshold, maxSkip) 87 88 // Select the node with the maximum score for placement 89 s.maxScore = NewMaxScoreIterator(ctx, s.limit) 90 return s 91 }