github.com/uchennaokeke444/nomad@v0.11.8/scheduler/testing.go (about) 1 package scheduler 2 3 import ( 4 "fmt" 5 "sync" 6 "time" 7 8 testing "github.com/mitchellh/go-testing-interface" 9 "github.com/stretchr/testify/require" 10 11 "github.com/hashicorp/go-memdb" 12 "github.com/hashicorp/nomad/helper/testlog" 13 "github.com/hashicorp/nomad/nomad/state" 14 "github.com/hashicorp/nomad/nomad/structs" 15 ) 16 17 // RejectPlan is used to always reject the entire plan and force a state refresh 18 type RejectPlan struct { 19 Harness *Harness 20 } 21 22 func (r *RejectPlan) SubmitPlan(*structs.Plan) (*structs.PlanResult, State, error) { 23 result := new(structs.PlanResult) 24 result.RefreshIndex = r.Harness.NextIndex() 25 return result, r.Harness.State, nil 26 } 27 28 func (r *RejectPlan) UpdateEval(eval *structs.Evaluation) error { 29 return nil 30 } 31 32 func (r *RejectPlan) CreateEval(*structs.Evaluation) error { 33 return nil 34 } 35 36 func (r *RejectPlan) ReblockEval(*structs.Evaluation) error { 37 return nil 38 } 39 40 // Harness is a lightweight testing harness for schedulers. It manages a state 41 // store copy and provides the planner interface. It can be extended for various 42 // testing uses or for invoking the scheduler without side effects. 43 type Harness struct { 44 t testing.T 45 State *state.StateStore 46 47 Planner Planner 48 planLock sync.Mutex 49 50 Plans []*structs.Plan 51 Evals []*structs.Evaluation 52 CreateEvals []*structs.Evaluation 53 ReblockEvals []*structs.Evaluation 54 55 nextIndex uint64 56 nextIndexLock sync.Mutex 57 58 optimizePlan bool 59 } 60 61 // NewHarness is used to make a new testing harness 62 func NewHarness(t testing.T) *Harness { 63 state := state.TestStateStore(t) 64 h := &Harness{ 65 t: t, 66 State: state, 67 nextIndex: 1, 68 } 69 return h 70 } 71 72 // NewHarnessWithState creates a new harness with the given state for testing 73 // purposes. 74 func NewHarnessWithState(t testing.T, state *state.StateStore) *Harness { 75 return &Harness{ 76 t: t, 77 State: state, 78 nextIndex: 1, 79 } 80 } 81 82 // SubmitPlan is used to handle plan submission 83 func (h *Harness) SubmitPlan(plan *structs.Plan) (*structs.PlanResult, State, error) { 84 // Ensure sequential plan application 85 h.planLock.Lock() 86 defer h.planLock.Unlock() 87 88 // Store the plan 89 h.Plans = append(h.Plans, plan) 90 91 // Check for custom planner 92 if h.Planner != nil { 93 return h.Planner.SubmitPlan(plan) 94 } 95 96 // Get the index 97 index := h.NextIndex() 98 99 // Prepare the result 100 result := new(structs.PlanResult) 101 result.NodeUpdate = plan.NodeUpdate 102 result.NodeAllocation = plan.NodeAllocation 103 result.NodePreemptions = plan.NodePreemptions 104 result.AllocIndex = index 105 106 // Flatten evicts and allocs 107 now := time.Now().UTC().UnixNano() 108 109 allocsUpdated := make([]*structs.Allocation, 0, len(result.NodeAllocation)) 110 for _, allocList := range plan.NodeAllocation { 111 allocsUpdated = append(allocsUpdated, allocList...) 112 } 113 updateCreateTimestamp(allocsUpdated, now) 114 115 // Setup the update request 116 req := structs.ApplyPlanResultsRequest{ 117 AllocUpdateRequest: structs.AllocUpdateRequest{ 118 Job: plan.Job, 119 }, 120 Deployment: plan.Deployment, 121 DeploymentUpdates: plan.DeploymentUpdates, 122 EvalID: plan.EvalID, 123 } 124 125 if h.optimizePlan { 126 stoppedAllocDiffs := make([]*structs.AllocationDiff, 0, len(result.NodeUpdate)) 127 for _, updateList := range plan.NodeUpdate { 128 for _, stoppedAlloc := range updateList { 129 stoppedAllocDiffs = append(stoppedAllocDiffs, stoppedAlloc.AllocationDiff()) 130 } 131 } 132 req.AllocsStopped = stoppedAllocDiffs 133 134 req.AllocsUpdated = allocsUpdated 135 136 preemptedAllocDiffs := make([]*structs.AllocationDiff, 0, len(result.NodePreemptions)) 137 for _, preemptions := range plan.NodePreemptions { 138 for _, preemptedAlloc := range preemptions { 139 allocDiff := preemptedAlloc.AllocationDiff() 140 allocDiff.ModifyTime = now 141 preemptedAllocDiffs = append(preemptedAllocDiffs, allocDiff) 142 } 143 } 144 req.AllocsPreempted = preemptedAllocDiffs 145 } else { 146 // COMPAT 0.11: Handles unoptimized log format 147 var allocs []*structs.Allocation 148 149 allocsStopped := make([]*structs.Allocation, 0, len(result.NodeUpdate)) 150 for _, updateList := range plan.NodeUpdate { 151 allocsStopped = append(allocsStopped, updateList...) 152 } 153 allocs = append(allocs, allocsStopped...) 154 155 allocs = append(allocs, allocsUpdated...) 156 updateCreateTimestamp(allocs, now) 157 158 req.Alloc = allocs 159 160 // Set modify time for preempted allocs and flatten them 161 var preemptedAllocs []*structs.Allocation 162 for _, preemptions := range result.NodePreemptions { 163 for _, alloc := range preemptions { 164 alloc.ModifyTime = now 165 preemptedAllocs = append(preemptedAllocs, alloc) 166 } 167 } 168 169 req.NodePreemptions = preemptedAllocs 170 } 171 172 // Apply the full plan 173 err := h.State.UpsertPlanResults(index, &req) 174 return result, nil, err 175 } 176 177 // OptimizePlan is a function used only for Harness to help set the optimzePlan field, 178 // since Harness doesn't have access to a Server object 179 func (h *Harness) OptimizePlan(optimize bool) { 180 h.optimizePlan = optimize 181 } 182 183 func updateCreateTimestamp(allocations []*structs.Allocation, now int64) { 184 // Set the time the alloc was applied for the first time. This can be used 185 // to approximate the scheduling time. 186 for _, alloc := range allocations { 187 if alloc.CreateTime == 0 { 188 alloc.CreateTime = now 189 } 190 } 191 } 192 193 func (h *Harness) UpdateEval(eval *structs.Evaluation) error { 194 // Ensure sequential plan application 195 h.planLock.Lock() 196 defer h.planLock.Unlock() 197 198 // Store the eval 199 h.Evals = append(h.Evals, eval) 200 201 // Check for custom planner 202 if h.Planner != nil { 203 return h.Planner.UpdateEval(eval) 204 } 205 return nil 206 } 207 208 func (h *Harness) CreateEval(eval *structs.Evaluation) error { 209 // Ensure sequential plan application 210 h.planLock.Lock() 211 defer h.planLock.Unlock() 212 213 // Store the eval 214 h.CreateEvals = append(h.CreateEvals, eval) 215 216 // Check for custom planner 217 if h.Planner != nil { 218 return h.Planner.CreateEval(eval) 219 } 220 return nil 221 } 222 223 func (h *Harness) ReblockEval(eval *structs.Evaluation) error { 224 // Ensure sequential plan application 225 h.planLock.Lock() 226 defer h.planLock.Unlock() 227 228 // Check that the evaluation was already blocked. 229 ws := memdb.NewWatchSet() 230 old, err := h.State.EvalByID(ws, eval.ID) 231 if err != nil { 232 return err 233 } 234 235 if old == nil { 236 return fmt.Errorf("evaluation does not exist to be reblocked") 237 } 238 if old.Status != structs.EvalStatusBlocked { 239 return fmt.Errorf("evaluation %q is not already in a blocked state", old.ID) 240 } 241 242 h.ReblockEvals = append(h.ReblockEvals, eval) 243 return nil 244 } 245 246 // NextIndex returns the next index 247 func (h *Harness) NextIndex() uint64 { 248 h.nextIndexLock.Lock() 249 defer h.nextIndexLock.Unlock() 250 idx := h.nextIndex 251 h.nextIndex += 1 252 return idx 253 } 254 255 // Snapshot is used to snapshot the current state 256 func (h *Harness) Snapshot() State { 257 snap, _ := h.State.Snapshot() 258 return snap 259 } 260 261 // Scheduler is used to return a new scheduler from 262 // a snapshot of current state using the harness for planning. 263 func (h *Harness) Scheduler(factory Factory) Scheduler { 264 logger := testlog.HCLogger(h.t) 265 return factory(logger, h.Snapshot(), h) 266 } 267 268 // Process is used to process an evaluation given a factory 269 // function to create the scheduler 270 func (h *Harness) Process(factory Factory, eval *structs.Evaluation) error { 271 sched := h.Scheduler(factory) 272 return sched.Process(eval) 273 } 274 275 func (h *Harness) AssertEvalStatus(t testing.T, state string) { 276 require.Len(t, h.Evals, 1) 277 update := h.Evals[0] 278 require.Equal(t, state, update.Status) 279 }