github.com/hernad/nomad@v1.6.112/nomad/state/state_store_test.go (about) 1 // Copyright (c) HashiCorp, Inc. 2 // SPDX-License-Identifier: MPL-2.0 3 4 package state 5 6 import ( 7 "context" 8 "fmt" 9 "reflect" 10 "sort" 11 "strconv" 12 "strings" 13 "testing" 14 "time" 15 16 "github.com/hashicorp/go-memdb" 17 "github.com/hernad/nomad/ci" 18 "github.com/hernad/nomad/helper/pointer" 19 "github.com/hernad/nomad/helper/uuid" 20 "github.com/hernad/nomad/nomad/mock" 21 "github.com/hernad/nomad/nomad/structs" 22 "github.com/kr/pretty" 23 "github.com/shoenig/test/must" 24 "github.com/stretchr/testify/assert" 25 "github.com/stretchr/testify/require" 26 ) 27 28 func testStateStore(t *testing.T) *StateStore { 29 return TestStateStore(t) 30 } 31 32 func TestStateStore_Blocking_Error(t *testing.T) { 33 ci.Parallel(t) 34 35 expected := fmt.Errorf("test error") 36 errFn := func(memdb.WatchSet, *StateStore) (interface{}, uint64, error) { 37 return nil, 0, expected 38 } 39 40 state := testStateStore(t) 41 _, idx, err := state.BlockingQuery(errFn, 10, context.Background()) 42 assert.EqualError(t, err, expected.Error()) 43 assert.Zero(t, idx) 44 } 45 46 func TestStateStore_Blocking_Timeout(t *testing.T) { 47 ci.Parallel(t) 48 49 noopFn := func(memdb.WatchSet, *StateStore) (interface{}, uint64, error) { 50 return nil, 5, nil 51 } 52 53 state := testStateStore(t) 54 timeout := time.Now().Add(250 * time.Millisecond) 55 deadlineCtx, cancel := context.WithDeadline(context.Background(), timeout) 56 defer cancel() 57 58 _, idx, err := state.BlockingQuery(noopFn, 10, deadlineCtx) 59 assert.EqualError(t, err, context.DeadlineExceeded.Error()) 60 assert.EqualValues(t, 5, idx) 61 assert.WithinDuration(t, timeout, time.Now(), 100*time.Millisecond) 62 } 63 64 func TestStateStore_Blocking_MinQuery(t *testing.T) { 65 ci.Parallel(t) 66 67 node := mock.Node() 68 count := 0 69 queryFn := func(ws memdb.WatchSet, s *StateStore) (interface{}, uint64, error) { 70 _, err := s.NodeByID(ws, node.ID) 71 if err != nil { 72 return nil, 0, err 73 } 74 75 count++ 76 if count == 1 { 77 return false, 5, nil 78 } else if count > 2 { 79 return false, 20, fmt.Errorf("called too many times") 80 } 81 82 return true, 11, nil 83 } 84 85 state := testStateStore(t) 86 timeout := time.Now().Add(100 * time.Millisecond) 87 deadlineCtx, cancel := context.WithDeadline(context.Background(), timeout) 88 defer cancel() 89 90 time.AfterFunc(5*time.Millisecond, func() { 91 state.UpsertNode(structs.MsgTypeTestSetup, 11, node) 92 }) 93 94 resp, idx, err := state.BlockingQuery(queryFn, 10, deadlineCtx) 95 if assert.Nil(t, err) { 96 assert.Equal(t, 2, count) 97 assert.EqualValues(t, 11, idx) 98 assert.True(t, resp.(bool)) 99 } 100 } 101 102 // COMPAT 0.11: Uses AllocUpdateRequest.Alloc 103 // This test checks that: 104 // 1) The job is denormalized 105 // 2) Allocations are created 106 func TestStateStore_UpsertPlanResults_AllocationsCreated_Denormalized(t *testing.T) { 107 ci.Parallel(t) 108 109 state := testStateStore(t) 110 alloc := mock.Alloc() 111 job := alloc.Job 112 alloc.Job = nil 113 114 if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, job); err != nil { 115 t.Fatalf("err: %v", err) 116 } 117 118 eval := mock.Eval() 119 eval.JobID = job.ID 120 121 // Create an eval 122 if err := state.UpsertEvals(structs.MsgTypeTestSetup, 1, []*structs.Evaluation{eval}); err != nil { 123 t.Fatalf("err: %v", err) 124 } 125 126 // Create a plan result 127 res := structs.ApplyPlanResultsRequest{ 128 AllocUpdateRequest: structs.AllocUpdateRequest{ 129 Alloc: []*structs.Allocation{alloc}, 130 Job: job, 131 }, 132 EvalID: eval.ID, 133 } 134 assert := assert.New(t) 135 err := state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, &res) 136 assert.Nil(err) 137 138 ws := memdb.NewWatchSet() 139 out, err := state.AllocByID(ws, alloc.ID) 140 assert.Nil(err) 141 assert.Equal(alloc, out) 142 143 index, err := state.Index("allocs") 144 assert.Nil(err) 145 assert.EqualValues(1000, index) 146 147 if watchFired(ws) { 148 t.Fatalf("bad") 149 } 150 151 evalOut, err := state.EvalByID(ws, eval.ID) 152 assert.Nil(err) 153 assert.NotNil(evalOut) 154 assert.EqualValues(1000, evalOut.ModifyIndex) 155 } 156 157 // This test checks that: 158 // 1) The job is denormalized 159 // 2) Allocations are denormalized and updated with the diff 160 // That stopped allocs Job is unmodified 161 func TestStateStore_UpsertPlanResults_AllocationsDenormalized(t *testing.T) { 162 ci.Parallel(t) 163 164 state := testStateStore(t) 165 alloc := mock.Alloc() 166 job := alloc.Job 167 alloc.Job = nil 168 169 stoppedAlloc := mock.Alloc() 170 stoppedAlloc.Job = job 171 stoppedAllocDiff := &structs.AllocationDiff{ 172 ID: stoppedAlloc.ID, 173 DesiredDescription: "desired desc", 174 ClientStatus: structs.AllocClientStatusLost, 175 } 176 preemptedAlloc := mock.Alloc() 177 preemptedAlloc.Job = job 178 preemptedAllocDiff := &structs.AllocationDiff{ 179 ID: preemptedAlloc.ID, 180 PreemptedByAllocation: alloc.ID, 181 } 182 183 require := require.New(t) 184 require.NoError(state.UpsertAllocs(structs.MsgTypeTestSetup, 900, []*structs.Allocation{stoppedAlloc, preemptedAlloc})) 185 require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, job)) 186 187 // modify job and ensure that stopped and preempted alloc point to original Job 188 mJob := job.Copy() 189 mJob.TaskGroups[0].Name = "other" 190 191 require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, mJob)) 192 193 eval := mock.Eval() 194 eval.JobID = job.ID 195 196 // Create an eval 197 require.NoError(state.UpsertEvals(structs.MsgTypeTestSetup, 1, []*structs.Evaluation{eval})) 198 199 // Create a plan result 200 res := structs.ApplyPlanResultsRequest{ 201 AllocUpdateRequest: structs.AllocUpdateRequest{ 202 AllocsUpdated: []*structs.Allocation{alloc}, 203 AllocsStopped: []*structs.AllocationDiff{stoppedAllocDiff}, 204 Job: mJob, 205 }, 206 EvalID: eval.ID, 207 AllocsPreempted: []*structs.AllocationDiff{preemptedAllocDiff}, 208 } 209 assert := assert.New(t) 210 planModifyIndex := uint64(1000) 211 err := state.UpsertPlanResults(structs.MsgTypeTestSetup, planModifyIndex, &res) 212 require.NoError(err) 213 214 ws := memdb.NewWatchSet() 215 out, err := state.AllocByID(ws, alloc.ID) 216 require.NoError(err) 217 assert.Equal(alloc, out) 218 219 outJob, err := state.JobByID(ws, job.Namespace, job.ID) 220 require.NoError(err) 221 require.Equal(mJob.TaskGroups, outJob.TaskGroups) 222 require.NotEmpty(job.TaskGroups, outJob.TaskGroups) 223 224 updatedStoppedAlloc, err := state.AllocByID(ws, stoppedAlloc.ID) 225 require.NoError(err) 226 assert.Equal(stoppedAllocDiff.DesiredDescription, updatedStoppedAlloc.DesiredDescription) 227 assert.Equal(structs.AllocDesiredStatusStop, updatedStoppedAlloc.DesiredStatus) 228 assert.Equal(stoppedAllocDiff.ClientStatus, updatedStoppedAlloc.ClientStatus) 229 assert.Equal(planModifyIndex, updatedStoppedAlloc.AllocModifyIndex) 230 assert.Equal(planModifyIndex, updatedStoppedAlloc.AllocModifyIndex) 231 assert.Equal(job.TaskGroups, updatedStoppedAlloc.Job.TaskGroups) 232 233 updatedPreemptedAlloc, err := state.AllocByID(ws, preemptedAlloc.ID) 234 require.NoError(err) 235 assert.Equal(structs.AllocDesiredStatusEvict, updatedPreemptedAlloc.DesiredStatus) 236 assert.Equal(preemptedAllocDiff.PreemptedByAllocation, updatedPreemptedAlloc.PreemptedByAllocation) 237 assert.Equal(planModifyIndex, updatedPreemptedAlloc.AllocModifyIndex) 238 assert.Equal(planModifyIndex, updatedPreemptedAlloc.AllocModifyIndex) 239 assert.Equal(job.TaskGroups, updatedPreemptedAlloc.Job.TaskGroups) 240 241 index, err := state.Index("allocs") 242 require.NoError(err) 243 assert.EqualValues(planModifyIndex, index) 244 245 require.False(watchFired(ws)) 246 247 evalOut, err := state.EvalByID(ws, eval.ID) 248 require.NoError(err) 249 require.NotNil(evalOut) 250 assert.EqualValues(planModifyIndex, evalOut.ModifyIndex) 251 252 } 253 254 // This test checks that the deployment is created and allocations count towards 255 // the deployment 256 func TestStateStore_UpsertPlanResults_Deployment(t *testing.T) { 257 ci.Parallel(t) 258 259 state := testStateStore(t) 260 alloc := mock.Alloc() 261 alloc2 := mock.Alloc() 262 job := alloc.Job 263 alloc.Job = nil 264 alloc2.Job = nil 265 266 d := mock.Deployment() 267 alloc.DeploymentID = d.ID 268 alloc2.DeploymentID = d.ID 269 270 if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, job); err != nil { 271 t.Fatalf("err: %v", err) 272 } 273 274 eval := mock.Eval() 275 eval.JobID = job.ID 276 277 // Create an eval 278 if err := state.UpsertEvals(structs.MsgTypeTestSetup, 1, []*structs.Evaluation{eval}); err != nil { 279 t.Fatalf("err: %v", err) 280 } 281 282 // Create a plan result 283 res := structs.ApplyPlanResultsRequest{ 284 AllocUpdateRequest: structs.AllocUpdateRequest{ 285 Alloc: []*structs.Allocation{alloc, alloc2}, 286 Job: job, 287 }, 288 Deployment: d, 289 EvalID: eval.ID, 290 } 291 292 err := state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, &res) 293 if err != nil { 294 t.Fatalf("err: %v", err) 295 } 296 297 ws := memdb.NewWatchSet() 298 assert := assert.New(t) 299 out, err := state.AllocByID(ws, alloc.ID) 300 assert.Nil(err) 301 assert.Equal(alloc, out) 302 303 dout, err := state.DeploymentByID(ws, d.ID) 304 assert.Nil(err) 305 assert.NotNil(dout) 306 307 tg, ok := dout.TaskGroups[alloc.TaskGroup] 308 assert.True(ok) 309 assert.NotNil(tg) 310 assert.Equal(2, tg.PlacedAllocs) 311 312 evalOut, err := state.EvalByID(ws, eval.ID) 313 assert.Nil(err) 314 assert.NotNil(evalOut) 315 assert.EqualValues(1000, evalOut.ModifyIndex) 316 317 if watchFired(ws) { 318 t.Fatalf("bad") 319 } 320 321 // Update the allocs to be part of a new deployment 322 d2 := d.Copy() 323 d2.ID = uuid.Generate() 324 325 allocNew := alloc.Copy() 326 allocNew.DeploymentID = d2.ID 327 allocNew2 := alloc2.Copy() 328 allocNew2.DeploymentID = d2.ID 329 330 // Create another plan 331 res = structs.ApplyPlanResultsRequest{ 332 AllocUpdateRequest: structs.AllocUpdateRequest{ 333 Alloc: []*structs.Allocation{allocNew, allocNew2}, 334 Job: job, 335 }, 336 Deployment: d2, 337 EvalID: eval.ID, 338 } 339 340 err = state.UpsertPlanResults(structs.MsgTypeTestSetup, 1001, &res) 341 if err != nil { 342 t.Fatalf("err: %v", err) 343 } 344 345 dout, err = state.DeploymentByID(ws, d2.ID) 346 assert.Nil(err) 347 assert.NotNil(dout) 348 349 tg, ok = dout.TaskGroups[alloc.TaskGroup] 350 assert.True(ok) 351 assert.NotNil(tg) 352 assert.Equal(2, tg.PlacedAllocs) 353 354 evalOut, err = state.EvalByID(ws, eval.ID) 355 assert.Nil(err) 356 assert.NotNil(evalOut) 357 assert.EqualValues(1001, evalOut.ModifyIndex) 358 } 359 360 // This test checks that: 361 // 1) Preempted allocations in plan results are updated 362 // 2) Evals are inserted for preempted jobs 363 func TestStateStore_UpsertPlanResults_PreemptedAllocs(t *testing.T) { 364 ci.Parallel(t) 365 require := require.New(t) 366 367 state := testStateStore(t) 368 alloc := mock.Alloc() 369 job := alloc.Job 370 alloc.Job = nil 371 372 // Insert job 373 err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, job) 374 require.NoError(err) 375 376 // Create an eval 377 eval := mock.Eval() 378 eval.JobID = job.ID 379 err = state.UpsertEvals(structs.MsgTypeTestSetup, 1, []*structs.Evaluation{eval}) 380 require.NoError(err) 381 382 // Insert alloc that will be preempted in the plan 383 preemptedAlloc := mock.Alloc() 384 err = state.UpsertAllocs(structs.MsgTypeTestSetup, 2, []*structs.Allocation{preemptedAlloc}) 385 require.NoError(err) 386 387 minimalPreemptedAlloc := &structs.Allocation{ 388 ID: preemptedAlloc.ID, 389 PreemptedByAllocation: alloc.ID, 390 ModifyTime: time.Now().Unix(), 391 } 392 393 // Create eval for preempted job 394 eval2 := mock.Eval() 395 eval2.JobID = preemptedAlloc.JobID 396 397 // Create a plan result 398 res := structs.ApplyPlanResultsRequest{ 399 AllocUpdateRequest: structs.AllocUpdateRequest{ 400 Alloc: []*structs.Allocation{alloc}, 401 Job: job, 402 }, 403 EvalID: eval.ID, 404 NodePreemptions: []*structs.Allocation{minimalPreemptedAlloc}, 405 PreemptionEvals: []*structs.Evaluation{eval2}, 406 } 407 408 err = state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, &res) 409 require.NoError(err) 410 411 ws := memdb.NewWatchSet() 412 413 // Verify alloc and eval created by plan 414 out, err := state.AllocByID(ws, alloc.ID) 415 require.NoError(err) 416 require.Equal(alloc, out) 417 418 index, err := state.Index("allocs") 419 require.NoError(err) 420 require.EqualValues(1000, index) 421 422 evalOut, err := state.EvalByID(ws, eval.ID) 423 require.NoError(err) 424 require.NotNil(evalOut) 425 require.EqualValues(1000, evalOut.ModifyIndex) 426 427 // Verify preempted alloc 428 preempted, err := state.AllocByID(ws, preemptedAlloc.ID) 429 require.NoError(err) 430 require.Equal(preempted.DesiredStatus, structs.AllocDesiredStatusEvict) 431 require.Equal(preempted.DesiredDescription, fmt.Sprintf("Preempted by alloc ID %v", alloc.ID)) 432 require.Equal(preempted.Job.ID, preemptedAlloc.Job.ID) 433 require.Equal(preempted.Job, preemptedAlloc.Job) 434 435 // Verify eval for preempted job 436 preemptedJobEval, err := state.EvalByID(ws, eval2.ID) 437 require.NoError(err) 438 require.NotNil(preemptedJobEval) 439 require.EqualValues(1000, preemptedJobEval.ModifyIndex) 440 441 } 442 443 // This test checks that deployment updates are applied correctly 444 func TestStateStore_UpsertPlanResults_DeploymentUpdates(t *testing.T) { 445 ci.Parallel(t) 446 state := testStateStore(t) 447 448 // Create a job that applies to all 449 job := mock.Job() 450 if err := state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, job); err != nil { 451 t.Fatalf("err: %v", err) 452 } 453 454 // Create a deployment that we will update its status 455 doutstanding := mock.Deployment() 456 doutstanding.JobID = job.ID 457 458 if err := state.UpsertDeployment(1000, doutstanding); err != nil { 459 t.Fatalf("err: %v", err) 460 } 461 462 eval := mock.Eval() 463 eval.JobID = job.ID 464 465 // Create an eval 466 if err := state.UpsertEvals(structs.MsgTypeTestSetup, 1, []*structs.Evaluation{eval}); err != nil { 467 t.Fatalf("err: %v", err) 468 } 469 alloc := mock.Alloc() 470 alloc.Job = nil 471 472 dnew := mock.Deployment() 473 dnew.JobID = job.ID 474 alloc.DeploymentID = dnew.ID 475 476 // Update the old deployment 477 update := &structs.DeploymentStatusUpdate{ 478 DeploymentID: doutstanding.ID, 479 Status: "foo", 480 StatusDescription: "bar", 481 } 482 483 // Create a plan result 484 res := structs.ApplyPlanResultsRequest{ 485 AllocUpdateRequest: structs.AllocUpdateRequest{ 486 Alloc: []*structs.Allocation{alloc}, 487 Job: job, 488 }, 489 Deployment: dnew, 490 DeploymentUpdates: []*structs.DeploymentStatusUpdate{update}, 491 EvalID: eval.ID, 492 } 493 494 err := state.UpsertPlanResults(structs.MsgTypeTestSetup, 1000, &res) 495 if err != nil { 496 t.Fatalf("err: %v", err) 497 } 498 assert := assert.New(t) 499 ws := memdb.NewWatchSet() 500 501 // Check the deployments are correctly updated. 502 dout, err := state.DeploymentByID(ws, dnew.ID) 503 assert.Nil(err) 504 assert.NotNil(dout) 505 506 tg, ok := dout.TaskGroups[alloc.TaskGroup] 507 assert.True(ok) 508 assert.NotNil(tg) 509 assert.Equal(1, tg.PlacedAllocs) 510 511 doutstandingout, err := state.DeploymentByID(ws, doutstanding.ID) 512 assert.Nil(err) 513 assert.NotNil(doutstandingout) 514 assert.Equal(update.Status, doutstandingout.Status) 515 assert.Equal(update.StatusDescription, doutstandingout.StatusDescription) 516 assert.EqualValues(1000, doutstandingout.ModifyIndex) 517 518 evalOut, err := state.EvalByID(ws, eval.ID) 519 assert.Nil(err) 520 assert.NotNil(evalOut) 521 assert.EqualValues(1000, evalOut.ModifyIndex) 522 if watchFired(ws) { 523 t.Fatalf("bad") 524 } 525 } 526 527 func TestStateStore_UpsertDeployment(t *testing.T) { 528 ci.Parallel(t) 529 530 state := testStateStore(t) 531 deployment := mock.Deployment() 532 533 // Create a watchset so we can test that upsert fires the watch 534 ws := memdb.NewWatchSet() 535 _, err := state.DeploymentsByJobID(ws, deployment.Namespace, deployment.ID, true) 536 if err != nil { 537 t.Fatalf("bad: %v", err) 538 } 539 540 err = state.UpsertDeployment(1000, deployment) 541 if err != nil { 542 t.Fatalf("err: %v", err) 543 } 544 if !watchFired(ws) { 545 t.Fatalf("bad") 546 } 547 548 ws = memdb.NewWatchSet() 549 out, err := state.DeploymentByID(ws, deployment.ID) 550 if err != nil { 551 t.Fatalf("err: %v", err) 552 } 553 554 if !reflect.DeepEqual(deployment, out) { 555 t.Fatalf("bad: %#v %#v", deployment, out) 556 } 557 558 index, err := state.Index("deployment") 559 if err != nil { 560 t.Fatalf("err: %v", err) 561 } 562 if index != 1000 { 563 t.Fatalf("bad: %d", index) 564 } 565 566 if watchFired(ws) { 567 t.Fatalf("bad") 568 } 569 } 570 571 // Tests that deployments of older create index and same job id are not returned 572 func TestStateStore_OldDeployment(t *testing.T) { 573 ci.Parallel(t) 574 575 state := testStateStore(t) 576 job := mock.Job() 577 job.ID = "job1" 578 state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) 579 580 deploy1 := mock.Deployment() 581 deploy1.JobID = job.ID 582 deploy1.JobCreateIndex = job.CreateIndex 583 584 deploy2 := mock.Deployment() 585 deploy2.JobID = job.ID 586 deploy2.JobCreateIndex = 11 587 588 require := require.New(t) 589 590 // Insert both deployments 591 err := state.UpsertDeployment(1001, deploy1) 592 require.Nil(err) 593 594 err = state.UpsertDeployment(1002, deploy2) 595 require.Nil(err) 596 597 ws := memdb.NewWatchSet() 598 // Should return both deployments 599 deploys, err := state.DeploymentsByJobID(ws, deploy1.Namespace, job.ID, true) 600 require.Nil(err) 601 require.Len(deploys, 2) 602 603 // Should only return deploy1 604 deploys, err = state.DeploymentsByJobID(ws, deploy1.Namespace, job.ID, false) 605 require.Nil(err) 606 require.Len(deploys, 1) 607 require.Equal(deploy1.ID, deploys[0].ID) 608 } 609 610 func TestStateStore_DeleteDeployment(t *testing.T) { 611 ci.Parallel(t) 612 613 state := testStateStore(t) 614 d1 := mock.Deployment() 615 d2 := mock.Deployment() 616 617 err := state.UpsertDeployment(1000, d1) 618 if err != nil { 619 t.Fatalf("err: %v", err) 620 } 621 if err := state.UpsertDeployment(1001, d2); err != nil { 622 t.Fatalf("err: %v", err) 623 } 624 625 // Create a watchset so we can test that delete fires the watch 626 ws := memdb.NewWatchSet() 627 if _, err := state.DeploymentByID(ws, d1.ID); err != nil { 628 t.Fatalf("bad: %v", err) 629 } 630 631 err = state.DeleteDeployment(1002, []string{d1.ID, d2.ID}) 632 if err != nil { 633 t.Fatalf("err: %v", err) 634 } 635 636 if !watchFired(ws) { 637 t.Fatalf("bad") 638 } 639 640 ws = memdb.NewWatchSet() 641 out, err := state.DeploymentByID(ws, d1.ID) 642 if err != nil { 643 t.Fatalf("err: %v", err) 644 } 645 646 if out != nil { 647 t.Fatalf("bad: %#v %#v", d1, out) 648 } 649 650 index, err := state.Index("deployment") 651 if err != nil { 652 t.Fatalf("err: %v", err) 653 } 654 if index != 1002 { 655 t.Fatalf("bad: %d", index) 656 } 657 658 if watchFired(ws) { 659 t.Fatalf("bad") 660 } 661 } 662 663 func TestStateStore_Deployments(t *testing.T) { 664 ci.Parallel(t) 665 666 state := testStateStore(t) 667 var deployments []*structs.Deployment 668 669 for i := 0; i < 10; i++ { 670 deployment := mock.Deployment() 671 deployments = append(deployments, deployment) 672 673 err := state.UpsertDeployment(1000+uint64(i), deployment) 674 require.NoError(t, err) 675 } 676 677 ws := memdb.NewWatchSet() 678 it, err := state.Deployments(ws, SortDefault) 679 require.NoError(t, err) 680 681 var out []*structs.Deployment 682 for { 683 raw := it.Next() 684 if raw == nil { 685 break 686 } 687 out = append(out, raw.(*structs.Deployment)) 688 } 689 690 require.Equal(t, deployments, out) 691 require.False(t, watchFired(ws)) 692 } 693 694 func TestStateStore_Deployments_Namespace(t *testing.T) { 695 ci.Parallel(t) 696 697 state := testStateStore(t) 698 699 ns1 := mock.Namespace() 700 ns1.Name = "namespaced" 701 deploy1 := mock.Deployment() 702 deploy2 := mock.Deployment() 703 deploy1.Namespace = ns1.Name 704 deploy2.Namespace = ns1.Name 705 706 ns2 := mock.Namespace() 707 ns2.Name = "new-namespace" 708 deploy3 := mock.Deployment() 709 deploy4 := mock.Deployment() 710 deploy3.Namespace = ns2.Name 711 deploy4.Namespace = ns2.Name 712 713 require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) 714 715 // Create watchsets so we can test that update fires the watch 716 watches := []memdb.WatchSet{memdb.NewWatchSet(), memdb.NewWatchSet()} 717 _, err := state.DeploymentsByNamespace(watches[0], ns1.Name) 718 require.NoError(t, err) 719 _, err = state.DeploymentsByNamespace(watches[1], ns2.Name) 720 require.NoError(t, err) 721 722 require.NoError(t, state.UpsertDeployment(1001, deploy1)) 723 require.NoError(t, state.UpsertDeployment(1002, deploy2)) 724 require.NoError(t, state.UpsertDeployment(1003, deploy3)) 725 require.NoError(t, state.UpsertDeployment(1004, deploy4)) 726 require.True(t, watchFired(watches[0])) 727 require.True(t, watchFired(watches[1])) 728 729 ws := memdb.NewWatchSet() 730 iter1, err := state.DeploymentsByNamespace(ws, ns1.Name) 731 require.NoError(t, err) 732 iter2, err := state.DeploymentsByNamespace(ws, ns2.Name) 733 require.NoError(t, err) 734 735 var out1 []*structs.Deployment 736 for { 737 raw := iter1.Next() 738 if raw == nil { 739 break 740 } 741 out1 = append(out1, raw.(*structs.Deployment)) 742 } 743 744 var out2 []*structs.Deployment 745 for { 746 raw := iter2.Next() 747 if raw == nil { 748 break 749 } 750 out2 = append(out2, raw.(*structs.Deployment)) 751 } 752 753 require.Len(t, out1, 2) 754 require.Len(t, out2, 2) 755 756 for _, deploy := range out1 { 757 require.Equal(t, ns1.Name, deploy.Namespace) 758 } 759 for _, deploy := range out2 { 760 require.Equal(t, ns2.Name, deploy.Namespace) 761 } 762 763 index, err := state.Index("deployment") 764 require.NoError(t, err) 765 require.EqualValues(t, 1004, index) 766 require.False(t, watchFired(ws)) 767 } 768 769 func TestStateStore_DeploymentsByIDPrefix(t *testing.T) { 770 ci.Parallel(t) 771 772 state := testStateStore(t) 773 deploy := mock.Deployment() 774 775 deploy.ID = "11111111-662e-d0ab-d1c9-3e434af7bdb4" 776 err := state.UpsertDeployment(1000, deploy) 777 require.NoError(t, err) 778 779 gatherDeploys := func(iter memdb.ResultIterator) []*structs.Deployment { 780 var deploys []*structs.Deployment 781 for { 782 raw := iter.Next() 783 if raw == nil { 784 break 785 } 786 deploy := raw.(*structs.Deployment) 787 deploys = append(deploys, deploy) 788 } 789 return deploys 790 } 791 792 t.Run("first deployment", func(t *testing.T) { 793 // Create a watchset so we can test that getters don't cause it to fire 794 ws := memdb.NewWatchSet() 795 iter, err := state.DeploymentsByIDPrefix(ws, deploy.Namespace, deploy.ID, SortDefault) 796 require.NoError(t, err) 797 798 deploys := gatherDeploys(iter) 799 require.Len(t, deploys, 1) 800 require.False(t, watchFired(ws)) 801 }) 802 803 t.Run("using prefix", func(t *testing.T) { 804 ws := memdb.NewWatchSet() 805 iter, err := state.DeploymentsByIDPrefix(ws, deploy.Namespace, "11", SortDefault) 806 require.NoError(t, err) 807 808 deploys := gatherDeploys(iter) 809 require.Len(t, deploys, 1) 810 require.False(t, watchFired(ws)) 811 }) 812 813 deploy = mock.Deployment() 814 deploy.ID = "11222222-662e-d0ab-d1c9-3e434af7bdb4" 815 err = state.UpsertDeployment(1001, deploy) 816 require.NoError(t, err) 817 818 t.Run("more than one", func(t *testing.T) { 819 ws := memdb.NewWatchSet() 820 iter, err := state.DeploymentsByIDPrefix(ws, deploy.Namespace, "11", SortDefault) 821 require.NoError(t, err) 822 823 deploys := gatherDeploys(iter) 824 require.Len(t, deploys, 2) 825 }) 826 827 t.Run("filter to one", func(t *testing.T) { 828 ws := memdb.NewWatchSet() 829 iter, err := state.DeploymentsByIDPrefix(ws, deploy.Namespace, "1111", SortDefault) 830 require.NoError(t, err) 831 832 deploys := gatherDeploys(iter) 833 require.Len(t, deploys, 1) 834 require.False(t, watchFired(ws)) 835 }) 836 837 t.Run("reverse order", func(t *testing.T) { 838 ws := memdb.NewWatchSet() 839 iter, err := state.DeploymentsByIDPrefix(ws, deploy.Namespace, "11", SortReverse) 840 require.NoError(t, err) 841 842 got := []string{} 843 for _, d := range gatherDeploys(iter) { 844 got = append(got, d.ID) 845 } 846 expected := []string{ 847 "11222222-662e-d0ab-d1c9-3e434af7bdb4", 848 "11111111-662e-d0ab-d1c9-3e434af7bdb4", 849 } 850 require.Equal(t, expected, got) 851 require.False(t, watchFired(ws)) 852 }) 853 } 854 855 func TestStateStore_DeploymentsByIDPrefix_Namespaces(t *testing.T) { 856 ci.Parallel(t) 857 858 state := testStateStore(t) 859 deploy1 := mock.Deployment() 860 deploy1.ID = "aabbbbbb-7bfb-395d-eb95-0685af2176b2" 861 deploy2 := mock.Deployment() 862 deploy2.ID = "aabbcbbb-7bfb-395d-eb95-0685af2176b2" 863 sharedPrefix := "aabb" 864 865 ns1 := mock.Namespace() 866 ns1.Name = "namespace1" 867 ns2 := mock.Namespace() 868 ns2.Name = "namespace2" 869 deploy1.Namespace = ns1.Name 870 deploy2.Namespace = ns2.Name 871 872 require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) 873 require.NoError(t, state.UpsertDeployment(1000, deploy1)) 874 require.NoError(t, state.UpsertDeployment(1001, deploy2)) 875 876 gatherDeploys := func(iter memdb.ResultIterator) []*structs.Deployment { 877 var deploys []*structs.Deployment 878 for { 879 raw := iter.Next() 880 if raw == nil { 881 break 882 } 883 deploy := raw.(*structs.Deployment) 884 deploys = append(deploys, deploy) 885 } 886 return deploys 887 } 888 889 ws := memdb.NewWatchSet() 890 iter1, err := state.DeploymentsByIDPrefix(ws, ns1.Name, sharedPrefix, SortDefault) 891 require.NoError(t, err) 892 iter2, err := state.DeploymentsByIDPrefix(ws, ns2.Name, sharedPrefix, SortDefault) 893 require.NoError(t, err) 894 895 deploysNs1 := gatherDeploys(iter1) 896 deploysNs2 := gatherDeploys(iter2) 897 require.Len(t, deploysNs1, 1) 898 require.Len(t, deploysNs2, 1) 899 900 iter1, err = state.DeploymentsByIDPrefix(ws, ns1.Name, deploy1.ID[:8], SortDefault) 901 require.NoError(t, err) 902 903 deploysNs1 = gatherDeploys(iter1) 904 require.Len(t, deploysNs1, 1) 905 require.False(t, watchFired(ws)) 906 } 907 908 func TestStateStore_UpsertNamespaces(t *testing.T) { 909 ci.Parallel(t) 910 911 state := testStateStore(t) 912 ns1 := mock.Namespace() 913 ns2 := mock.Namespace() 914 915 // Create a watchset so we can test that upsert fires the watch 916 ws := memdb.NewWatchSet() 917 _, err := state.NamespaceByName(ws, ns1.Name) 918 require.NoError(t, err) 919 920 require.NoError(t, state.UpsertNamespaces(1000, []*structs.Namespace{ns1, ns2})) 921 require.True(t, watchFired(ws)) 922 923 ws = memdb.NewWatchSet() 924 out, err := state.NamespaceByName(ws, ns1.Name) 925 require.NoError(t, err) 926 require.Equal(t, ns1, out) 927 928 out, err = state.NamespaceByName(ws, ns2.Name) 929 require.NoError(t, err) 930 require.Equal(t, ns2, out) 931 932 index, err := state.Index(TableNamespaces) 933 require.NoError(t, err) 934 require.EqualValues(t, 1000, index) 935 require.False(t, watchFired(ws)) 936 } 937 938 func TestStateStore_DeleteNamespaces(t *testing.T) { 939 ci.Parallel(t) 940 941 state := testStateStore(t) 942 ns1 := mock.Namespace() 943 ns2 := mock.Namespace() 944 945 require.NoError(t, state.UpsertNamespaces(1000, []*structs.Namespace{ns1, ns2})) 946 947 // Create a watchset so we can test that delete fires the watch 948 ws := memdb.NewWatchSet() 949 _, err := state.NamespaceByName(ws, ns1.Name) 950 require.NoError(t, err) 951 952 require.NoError(t, state.DeleteNamespaces(1001, []string{ns1.Name, ns2.Name})) 953 require.True(t, watchFired(ws)) 954 955 ws = memdb.NewWatchSet() 956 out, err := state.NamespaceByName(ws, ns1.Name) 957 require.NoError(t, err) 958 require.Nil(t, out) 959 960 out, err = state.NamespaceByName(ws, ns2.Name) 961 require.NoError(t, err) 962 require.Nil(t, out) 963 964 index, err := state.Index(TableNamespaces) 965 require.NoError(t, err) 966 require.EqualValues(t, 1001, index) 967 require.False(t, watchFired(ws)) 968 } 969 970 func TestStateStore_DeleteNamespaces_Default(t *testing.T) { 971 ci.Parallel(t) 972 973 state := testStateStore(t) 974 975 ns := mock.Namespace() 976 ns.Name = structs.DefaultNamespace 977 require.NoError(t, state.UpsertNamespaces(1000, []*structs.Namespace{ns})) 978 979 err := state.DeleteNamespaces(1002, []string{ns.Name}) 980 require.Error(t, err) 981 require.Contains(t, err.Error(), "can not be deleted") 982 } 983 984 func TestStateStore_DeleteNamespaces_NonTerminalJobs(t *testing.T) { 985 ci.Parallel(t) 986 987 state := testStateStore(t) 988 989 ns := mock.Namespace() 990 require.NoError(t, state.UpsertNamespaces(1000, []*structs.Namespace{ns})) 991 992 job := mock.Job() 993 job.Namespace = ns.Name 994 require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job)) 995 996 // Create a watchset so we can test that delete fires the watch 997 ws := memdb.NewWatchSet() 998 _, err := state.NamespaceByName(ws, ns.Name) 999 require.NoError(t, err) 1000 1001 err = state.DeleteNamespaces(1002, []string{ns.Name}) 1002 require.Error(t, err) 1003 require.Contains(t, err.Error(), "one non-terminal") 1004 require.False(t, watchFired(ws)) 1005 1006 ws = memdb.NewWatchSet() 1007 out, err := state.NamespaceByName(ws, ns.Name) 1008 require.NoError(t, err) 1009 require.NotNil(t, out) 1010 1011 index, err := state.Index(TableNamespaces) 1012 require.NoError(t, err) 1013 require.EqualValues(t, 1000, index) 1014 require.False(t, watchFired(ws)) 1015 } 1016 1017 func TestStateStore_DeleteNamespaces_CSIVolumes(t *testing.T) { 1018 ci.Parallel(t) 1019 1020 state := testStateStore(t) 1021 1022 ns := mock.Namespace() 1023 require.NoError(t, state.UpsertNamespaces(1000, []*structs.Namespace{ns})) 1024 1025 plugin := mock.CSIPlugin() 1026 vol := mock.CSIVolume(plugin) 1027 vol.Namespace = ns.Name 1028 1029 require.NoError(t, state.UpsertCSIVolume(1001, []*structs.CSIVolume{vol})) 1030 1031 // Create a watchset so we can test that delete fires the watch 1032 ws := memdb.NewWatchSet() 1033 _, err := state.NamespaceByName(ws, ns.Name) 1034 require.NoError(t, err) 1035 1036 err = state.DeleteNamespaces(1002, []string{ns.Name}) 1037 require.Error(t, err) 1038 require.Contains(t, err.Error(), "one CSI volume") 1039 require.False(t, watchFired(ws)) 1040 1041 ws = memdb.NewWatchSet() 1042 out, err := state.NamespaceByName(ws, ns.Name) 1043 require.NoError(t, err) 1044 require.NotNil(t, out) 1045 1046 index, err := state.Index(TableNamespaces) 1047 require.NoError(t, err) 1048 require.EqualValues(t, 1000, index) 1049 require.False(t, watchFired(ws)) 1050 } 1051 1052 func TestStateStore_DeleteNamespaces_Variables(t *testing.T) { 1053 ci.Parallel(t) 1054 1055 state := testStateStore(t) 1056 1057 ns := mock.Namespace() 1058 require.NoError(t, state.UpsertNamespaces(1000, []*structs.Namespace{ns})) 1059 1060 sv := mock.VariableEncrypted() 1061 sv.Namespace = ns.Name 1062 1063 resp := state.VarSet(1001, &structs.VarApplyStateRequest{ 1064 Op: structs.VarOpSet, 1065 Var: sv, 1066 }) 1067 require.NoError(t, resp.Error) 1068 1069 // Create a watchset so we can test that delete fires the watch 1070 ws := memdb.NewWatchSet() 1071 _, err := state.NamespaceByName(ws, ns.Name) 1072 require.NoError(t, err) 1073 1074 err = state.DeleteNamespaces(1002, []string{ns.Name}) 1075 require.Error(t, err) 1076 require.Contains(t, err.Error(), "one variable") 1077 require.False(t, watchFired(ws)) 1078 1079 ws = memdb.NewWatchSet() 1080 out, err := state.NamespaceByName(ws, ns.Name) 1081 require.NoError(t, err) 1082 require.NotNil(t, out) 1083 1084 index, err := state.Index(TableNamespaces) 1085 require.NoError(t, err) 1086 require.EqualValues(t, 1000, index) 1087 require.False(t, watchFired(ws)) 1088 } 1089 1090 func TestStateStore_Namespaces(t *testing.T) { 1091 ci.Parallel(t) 1092 1093 state := testStateStore(t) 1094 var namespaces []*structs.Namespace 1095 1096 for i := 0; i < 10; i++ { 1097 ns := mock.Namespace() 1098 namespaces = append(namespaces, ns) 1099 } 1100 1101 require.NoError(t, state.UpsertNamespaces(1000, namespaces)) 1102 1103 // Create a watchset so we can test that getters don't cause it to fire 1104 ws := memdb.NewWatchSet() 1105 iter, err := state.Namespaces(ws) 1106 require.NoError(t, err) 1107 1108 var out []*structs.Namespace 1109 for { 1110 raw := iter.Next() 1111 if raw == nil { 1112 break 1113 } 1114 ns := raw.(*structs.Namespace) 1115 if ns.Name == structs.DefaultNamespace { 1116 continue 1117 } 1118 out = append(out, ns) 1119 } 1120 1121 namespaceSort(namespaces) 1122 namespaceSort(out) 1123 require.Equal(t, namespaces, out) 1124 require.False(t, watchFired(ws)) 1125 } 1126 1127 func TestStateStore_NamespaceNames(t *testing.T) { 1128 ci.Parallel(t) 1129 1130 state := testStateStore(t) 1131 var namespaces []*structs.Namespace 1132 expectedNames := []string{structs.DefaultNamespace} 1133 1134 for i := 0; i < 10; i++ { 1135 ns := mock.Namespace() 1136 namespaces = append(namespaces, ns) 1137 expectedNames = append(expectedNames, ns.Name) 1138 } 1139 1140 err := state.UpsertNamespaces(1000, namespaces) 1141 require.NoError(t, err) 1142 1143 found, err := state.NamespaceNames() 1144 require.NoError(t, err) 1145 1146 sort.Strings(expectedNames) 1147 sort.Strings(found) 1148 1149 require.Equal(t, expectedNames, found) 1150 } 1151 1152 func TestStateStore_NamespaceByNamePrefix(t *testing.T) { 1153 ci.Parallel(t) 1154 1155 state := testStateStore(t) 1156 ns := mock.Namespace() 1157 1158 ns.Name = "foobar" 1159 require.NoError(t, state.UpsertNamespaces(1000, []*structs.Namespace{ns})) 1160 1161 // Create a watchset so we can test that getters don't cause it to fire 1162 ws := memdb.NewWatchSet() 1163 iter, err := state.NamespacesByNamePrefix(ws, ns.Name) 1164 require.NoError(t, err) 1165 1166 gatherNamespaces := func(iter memdb.ResultIterator) []*structs.Namespace { 1167 var namespaces []*structs.Namespace 1168 for { 1169 raw := iter.Next() 1170 if raw == nil { 1171 break 1172 } 1173 ns := raw.(*structs.Namespace) 1174 namespaces = append(namespaces, ns) 1175 } 1176 return namespaces 1177 } 1178 1179 namespaces := gatherNamespaces(iter) 1180 require.Len(t, namespaces, 1) 1181 require.False(t, watchFired(ws)) 1182 1183 iter, err = state.NamespacesByNamePrefix(ws, "foo") 1184 require.NoError(t, err) 1185 1186 namespaces = gatherNamespaces(iter) 1187 require.Len(t, namespaces, 1) 1188 1189 ns = mock.Namespace() 1190 ns.Name = "foozip" 1191 err = state.UpsertNamespaces(1001, []*structs.Namespace{ns}) 1192 require.NoError(t, err) 1193 require.True(t, watchFired(ws)) 1194 1195 ws = memdb.NewWatchSet() 1196 iter, err = state.NamespacesByNamePrefix(ws, "foo") 1197 require.NoError(t, err) 1198 1199 namespaces = gatherNamespaces(iter) 1200 require.Len(t, namespaces, 2) 1201 1202 iter, err = state.NamespacesByNamePrefix(ws, "foob") 1203 require.NoError(t, err) 1204 1205 namespaces = gatherNamespaces(iter) 1206 require.Len(t, namespaces, 1) 1207 require.False(t, watchFired(ws)) 1208 } 1209 1210 func TestStateStore_RestoreNamespace(t *testing.T) { 1211 ci.Parallel(t) 1212 1213 state := testStateStore(t) 1214 ns := mock.Namespace() 1215 1216 restore, err := state.Restore() 1217 require.NoError(t, err) 1218 1219 require.NoError(t, restore.NamespaceRestore(ns)) 1220 restore.Commit() 1221 1222 ws := memdb.NewWatchSet() 1223 out, err := state.NamespaceByName(ws, ns.Name) 1224 require.NoError(t, err) 1225 require.Equal(t, out, ns) 1226 } 1227 1228 // namespaceSort is used to sort namespaces by name 1229 func namespaceSort(namespaces []*structs.Namespace) { 1230 sort.Slice(namespaces, func(i, j int) bool { 1231 return namespaces[i].Name < namespaces[j].Name 1232 }) 1233 } 1234 1235 func TestStateStore_UpsertNode_Node(t *testing.T) { 1236 ci.Parallel(t) 1237 1238 require := require.New(t) 1239 state := testStateStore(t) 1240 node := mock.Node() 1241 1242 // Create a watchset so we can test that upsert fires the watch 1243 ws := memdb.NewWatchSet() 1244 _, err := state.NodeByID(ws, node.ID) 1245 require.NoError(err) 1246 1247 require.NoError(state.UpsertNode(structs.MsgTypeTestSetup, 1000, node)) 1248 require.True(watchFired(ws)) 1249 1250 ws = memdb.NewWatchSet() 1251 out, err := state.NodeByID(ws, node.ID) 1252 require.NoError(err) 1253 1254 out2, err := state.NodeBySecretID(ws, node.SecretID) 1255 require.NoError(err) 1256 require.EqualValues(node, out) 1257 require.EqualValues(node, out2) 1258 require.Len(out.Events, 1) 1259 require.Equal(NodeRegisterEventRegistered, out.Events[0].Message) 1260 1261 index, err := state.Index("nodes") 1262 require.NoError(err) 1263 require.EqualValues(1000, index) 1264 require.False(watchFired(ws)) 1265 1266 // Transition the node to down and then up and ensure we get a re-register 1267 // event 1268 down := out.Copy() 1269 down.Status = structs.NodeStatusDown 1270 require.NoError(state.UpsertNode(structs.MsgTypeTestSetup, 1001, down)) 1271 require.NoError(state.UpsertNode(structs.MsgTypeTestSetup, 1002, out)) 1272 1273 out, err = state.NodeByID(ws, node.ID) 1274 require.NoError(err) 1275 require.Len(out.Events, 2) 1276 require.Equal(NodeRegisterEventReregistered, out.Events[1].Message) 1277 } 1278 1279 func TestStateStore_UpsertNode_NodePool(t *testing.T) { 1280 ci.Parallel(t) 1281 1282 devPoolName := "dev" 1283 nodeWithPoolID := uuid.Generate() 1284 nodeWithoutPoolID := uuid.Generate() 1285 1286 testCases := []struct { 1287 name string 1288 nodeID string 1289 pool string 1290 createPool bool 1291 expectedPool string 1292 expectedPoolExists bool 1293 validateFn func(*testing.T, *structs.Node, *structs.NodePool) 1294 }{ 1295 { 1296 name: "register new node in new node pool", 1297 nodeID: "", 1298 pool: "new", 1299 createPool: true, 1300 expectedPool: "new", 1301 expectedPoolExists: true, 1302 validateFn: func(t *testing.T, node *structs.Node, pool *structs.NodePool) { 1303 // Verify node pool was created in the same transaction as the 1304 // node registration. 1305 must.Eq(t, pool.CreateIndex, node.ModifyIndex) 1306 }, 1307 }, 1308 { 1309 name: "register new node in existing node pool", 1310 nodeID: "", 1311 pool: devPoolName, 1312 expectedPool: devPoolName, 1313 expectedPoolExists: true, 1314 validateFn: func(t *testing.T, node *structs.Node, pool *structs.NodePool) { 1315 // Verify node pool was not modified. 1316 must.NotEq(t, pool.CreateIndex, node.ModifyIndex) 1317 }, 1318 }, 1319 { 1320 name: "register new node in built-in node pool", 1321 nodeID: "", 1322 pool: structs.NodePoolDefault, 1323 expectedPool: structs.NodePoolDefault, 1324 expectedPoolExists: true, 1325 validateFn: func(t *testing.T, node *structs.Node, pool *structs.NodePool) { 1326 // Verify node pool was not modified. 1327 must.Eq(t, 1, pool.ModifyIndex) 1328 }, 1329 }, 1330 { 1331 name: "move existing node to new node pool", 1332 nodeID: nodeWithPoolID, 1333 pool: "new", 1334 createPool: true, 1335 expectedPool: "new", 1336 expectedPoolExists: true, 1337 validateFn: func(t *testing.T, node *structs.Node, pool *structs.NodePool) { 1338 // Verify node pool was created in the same transaction as the 1339 // node was updated. 1340 must.Eq(t, pool.CreateIndex, node.ModifyIndex) 1341 }, 1342 }, 1343 { 1344 name: "move existing node to existing node pool", 1345 nodeID: nodeWithPoolID, 1346 pool: devPoolName, 1347 expectedPool: devPoolName, 1348 expectedPoolExists: true, 1349 }, 1350 { 1351 name: "move existing node to built-in node pool", 1352 nodeID: nodeWithPoolID, 1353 pool: structs.NodePoolDefault, 1354 expectedPool: structs.NodePoolDefault, 1355 expectedPoolExists: true, 1356 }, 1357 { 1358 name: "update node without pool to new node pool", 1359 nodeID: nodeWithoutPoolID, 1360 pool: "new", 1361 createPool: true, 1362 expectedPool: "new", 1363 expectedPoolExists: true, 1364 }, 1365 { 1366 name: "update node without pool to existing node pool", 1367 nodeID: nodeWithoutPoolID, 1368 pool: devPoolName, 1369 expectedPool: devPoolName, 1370 expectedPoolExists: true, 1371 }, 1372 { 1373 name: "update node without pool with empty string to default", 1374 nodeID: nodeWithoutPoolID, 1375 pool: "", 1376 expectedPool: structs.NodePoolDefault, 1377 expectedPoolExists: true, 1378 }, 1379 { 1380 name: "register new node in new node pool without creating it", 1381 nodeID: "", 1382 pool: "new", 1383 createPool: false, 1384 expectedPool: "new", 1385 expectedPoolExists: false, 1386 }, 1387 } 1388 1389 for _, tc := range testCases { 1390 t.Run(tc.name, func(t *testing.T) { 1391 state := testStateStore(t) 1392 1393 // Populate state with pre-existing node pool. 1394 devPool := mock.NodePool() 1395 devPool.Name = devPoolName 1396 err := state.UpsertNodePools(structs.MsgTypeTestSetup, 1000, []*structs.NodePool{devPool}) 1397 must.NoError(t, err) 1398 1399 // Populate state with pre-existing node assigned to the 1400 // pre-existing node pool. 1401 nodeWithPool := mock.Node() 1402 nodeWithPool.ID = nodeWithPoolID 1403 nodeWithPool.NodePool = devPool.Name 1404 err = state.UpsertNode(structs.MsgTypeTestSetup, 1001, nodeWithPool) 1405 must.NoError(t, err) 1406 1407 // Populate state with pre-existing node with nil node pool to 1408 // simulate an upgrade path. 1409 nodeWithoutPool := mock.Node() 1410 nodeWithoutPool.ID = nodeWithoutPoolID 1411 err = state.UpsertNode(structs.MsgTypeTestSetup, 1002, nodeWithoutPool) 1412 must.NoError(t, err) 1413 1414 // Upsert test node. 1415 var node *structs.Node 1416 switch tc.nodeID { 1417 case nodeWithPoolID: 1418 node = nodeWithPool.Copy() 1419 case nodeWithoutPoolID: 1420 node = nodeWithoutPool.Copy() 1421 default: 1422 node = mock.Node() 1423 } 1424 1425 node.NodePool = tc.pool 1426 opts := []NodeUpsertOption{} 1427 if tc.createPool { 1428 opts = append(opts, NodeUpsertWithNodePool) 1429 } 1430 err = state.UpsertNode(structs.MsgTypeTestSetup, 1003, node, opts...) 1431 must.NoError(t, err) 1432 1433 // Verify that node is part of the expected pool. 1434 got, err := state.NodeByID(nil, node.ID) 1435 must.NoError(t, err) 1436 must.NotNil(t, got) 1437 1438 // Verify node pool exists if requests. 1439 pool, err := state.NodePoolByName(nil, tc.expectedPool) 1440 must.NoError(t, err) 1441 if tc.expectedPoolExists { 1442 must.NotNil(t, pool) 1443 } else { 1444 must.Nil(t, pool) 1445 } 1446 1447 // Verify node was assigned to node pool. 1448 must.Eq(t, tc.expectedPool, got.NodePool) 1449 1450 if tc.validateFn != nil { 1451 tc.validateFn(t, got, pool) 1452 } 1453 }) 1454 } 1455 } 1456 1457 func TestStateStore_DeleteNode_Node(t *testing.T) { 1458 ci.Parallel(t) 1459 1460 state := testStateStore(t) 1461 1462 // Create and insert two nodes, which we'll delete 1463 node0 := mock.Node() 1464 node1 := mock.Node() 1465 err := state.UpsertNode(structs.MsgTypeTestSetup, 1000, node0) 1466 require.NoError(t, err) 1467 err = state.UpsertNode(structs.MsgTypeTestSetup, 1001, node1) 1468 require.NoError(t, err) 1469 1470 // Create a watchset so we can test that delete fires the watch 1471 ws := memdb.NewWatchSet() 1472 1473 // Check that both nodes are not nil 1474 out, err := state.NodeByID(ws, node0.ID) 1475 require.NoError(t, err) 1476 require.NotNil(t, out) 1477 out, err = state.NodeByID(ws, node1.ID) 1478 require.NoError(t, err) 1479 require.NotNil(t, out) 1480 1481 // Delete both nodes in a batch, fires the watch 1482 err = state.DeleteNode(structs.MsgTypeTestSetup, 1002, []string{node0.ID, node1.ID}) 1483 require.NoError(t, err) 1484 require.True(t, watchFired(ws)) 1485 1486 // Check that both nodes are nil 1487 ws = memdb.NewWatchSet() 1488 out, err = state.NodeByID(ws, node0.ID) 1489 require.NoError(t, err) 1490 require.Nil(t, out) 1491 out, err = state.NodeByID(ws, node1.ID) 1492 require.NoError(t, err) 1493 require.Nil(t, out) 1494 1495 // Ensure that the index is still at 1002, from DeleteNode 1496 index, err := state.Index("nodes") 1497 require.NoError(t, err) 1498 require.Equal(t, uint64(1002), index) 1499 require.False(t, watchFired(ws)) 1500 } 1501 1502 func TestStateStore_UpdateNodeStatus_Node(t *testing.T) { 1503 ci.Parallel(t) 1504 require := require.New(t) 1505 1506 state := testStateStore(t) 1507 node := mock.Node() 1508 1509 require.NoError(state.UpsertNode(structs.MsgTypeTestSetup, 800, node)) 1510 1511 // Create a watchset so we can test that update node status fires the watch 1512 ws := memdb.NewWatchSet() 1513 _, err := state.NodeByID(ws, node.ID) 1514 require.NoError(err) 1515 1516 event := &structs.NodeEvent{ 1517 Message: "Node ready foo", 1518 Subsystem: structs.NodeEventSubsystemCluster, 1519 Timestamp: time.Now(), 1520 } 1521 1522 require.NoError(state.UpdateNodeStatus(structs.MsgTypeTestSetup, 801, node.ID, structs.NodeStatusReady, 70, event)) 1523 require.True(watchFired(ws)) 1524 1525 ws = memdb.NewWatchSet() 1526 out, err := state.NodeByID(ws, node.ID) 1527 require.NoError(err) 1528 require.Equal(structs.NodeStatusReady, out.Status) 1529 require.EqualValues(801, out.ModifyIndex) 1530 require.EqualValues(70, out.StatusUpdatedAt) 1531 require.Len(out.Events, 2) 1532 require.Equal(event.Message, out.Events[1].Message) 1533 1534 index, err := state.Index("nodes") 1535 require.NoError(err) 1536 require.EqualValues(801, index) 1537 require.False(watchFired(ws)) 1538 } 1539 1540 func TestStatStore_UpdateNodeStatus_LastMissedHeartbeatIndex(t *testing.T) { 1541 ci.Parallel(t) 1542 1543 testCases := []struct { 1544 name string 1545 transitions []string 1546 expectedIndexes []uint64 1547 }{ 1548 { 1549 name: "disconnect", 1550 transitions: []string{ 1551 structs.NodeStatusReady, 1552 structs.NodeStatusDisconnected, 1553 }, 1554 expectedIndexes: []uint64{0, 1001}, 1555 }, 1556 { 1557 name: "reconnect", 1558 transitions: []string{ 1559 structs.NodeStatusReady, 1560 structs.NodeStatusDisconnected, 1561 structs.NodeStatusInit, 1562 structs.NodeStatusReady, 1563 }, 1564 expectedIndexes: []uint64{0, 1001, 1001, 0}, 1565 }, 1566 { 1567 name: "down", 1568 transitions: []string{ 1569 structs.NodeStatusReady, 1570 structs.NodeStatusDown, 1571 }, 1572 expectedIndexes: []uint64{0, 1001}, 1573 }, 1574 { 1575 name: "multiple reconnects", 1576 transitions: []string{ 1577 structs.NodeStatusReady, 1578 structs.NodeStatusDisconnected, 1579 structs.NodeStatusInit, 1580 structs.NodeStatusReady, 1581 structs.NodeStatusDown, 1582 structs.NodeStatusReady, 1583 structs.NodeStatusDisconnected, 1584 structs.NodeStatusInit, 1585 structs.NodeStatusReady, 1586 }, 1587 expectedIndexes: []uint64{0, 1001, 1001, 0, 1004, 0, 1006, 1006, 0}, 1588 }, 1589 { 1590 name: "multiple heartbeats", 1591 transitions: []string{ 1592 structs.NodeStatusReady, 1593 structs.NodeStatusDisconnected, 1594 structs.NodeStatusInit, 1595 structs.NodeStatusReady, 1596 structs.NodeStatusReady, 1597 structs.NodeStatusReady, 1598 }, 1599 expectedIndexes: []uint64{0, 1001, 1001, 0, 0, 0}, 1600 }, 1601 { 1602 name: "delayed alloc update", 1603 transitions: []string{ 1604 structs.NodeStatusReady, 1605 structs.NodeStatusDisconnected, 1606 structs.NodeStatusInit, 1607 structs.NodeStatusInit, 1608 structs.NodeStatusInit, 1609 structs.NodeStatusReady, 1610 }, 1611 expectedIndexes: []uint64{0, 1001, 1001, 1001, 1001, 0}, 1612 }, 1613 } 1614 1615 for _, tc := range testCases { 1616 t.Run(tc.name, func(t *testing.T) { 1617 state := testStateStore(t) 1618 node := mock.Node() 1619 must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 999, node)) 1620 1621 for i, status := range tc.transitions { 1622 now := time.Now().UnixNano() 1623 err := state.UpdateNodeStatus(structs.MsgTypeTestSetup, uint64(1000+i), node.ID, status, now, nil) 1624 must.NoError(t, err) 1625 1626 ws := memdb.NewWatchSet() 1627 out, err := state.NodeByID(ws, node.ID) 1628 must.NoError(t, err) 1629 must.Eq(t, tc.expectedIndexes[i], out.LastMissedHeartbeatIndex) 1630 must.Eq(t, status, out.Status) 1631 } 1632 }) 1633 } 1634 } 1635 1636 func TestStateStore_BatchUpdateNodeDrain(t *testing.T) { 1637 ci.Parallel(t) 1638 require := require.New(t) 1639 1640 state := testStateStore(t) 1641 1642 n1, n2 := mock.Node(), mock.Node() 1643 require.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1000, n1)) 1644 require.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1001, n2)) 1645 1646 // Create a watchset so we can test that update node drain fires the watch 1647 ws := memdb.NewWatchSet() 1648 _, err := state.NodeByID(ws, n1.ID) 1649 require.Nil(err) 1650 1651 expectedDrain := &structs.DrainStrategy{ 1652 DrainSpec: structs.DrainSpec{ 1653 Deadline: -1 * time.Second, 1654 }, 1655 } 1656 1657 update := map[string]*structs.DrainUpdate{ 1658 n1.ID: { 1659 DrainStrategy: expectedDrain, 1660 }, 1661 n2.ID: { 1662 DrainStrategy: expectedDrain, 1663 }, 1664 } 1665 1666 event := &structs.NodeEvent{ 1667 Message: "Drain strategy enabled", 1668 Subsystem: structs.NodeEventSubsystemDrain, 1669 Timestamp: time.Now(), 1670 } 1671 events := map[string]*structs.NodeEvent{ 1672 n1.ID: event, 1673 n2.ID: event, 1674 } 1675 1676 require.Nil(state.BatchUpdateNodeDrain(structs.MsgTypeTestSetup, 1002, 7, update, events)) 1677 require.True(watchFired(ws)) 1678 1679 ws = memdb.NewWatchSet() 1680 for _, id := range []string{n1.ID, n2.ID} { 1681 out, err := state.NodeByID(ws, id) 1682 require.Nil(err) 1683 require.NotNil(out.DrainStrategy) 1684 require.Equal(out.DrainStrategy, expectedDrain) 1685 require.NotNil(out.LastDrain) 1686 require.Equal(structs.DrainStatusDraining, out.LastDrain.Status) 1687 require.Len(out.Events, 2) 1688 require.EqualValues(1002, out.ModifyIndex) 1689 require.EqualValues(7, out.StatusUpdatedAt) 1690 } 1691 1692 index, err := state.Index("nodes") 1693 require.Nil(err) 1694 require.EqualValues(1002, index) 1695 require.False(watchFired(ws)) 1696 } 1697 1698 func TestStateStore_UpdateNodeDrain_Node(t *testing.T) { 1699 ci.Parallel(t) 1700 require := require.New(t) 1701 1702 state := testStateStore(t) 1703 node := mock.Node() 1704 1705 require.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1000, node)) 1706 1707 // Create a watchset so we can test that update node drain fires the watch 1708 ws := memdb.NewWatchSet() 1709 _, err := state.NodeByID(ws, node.ID) 1710 require.Nil(err) 1711 1712 expectedDrain := &structs.DrainStrategy{ 1713 DrainSpec: structs.DrainSpec{ 1714 Deadline: -1 * time.Second, 1715 }, 1716 } 1717 1718 event := &structs.NodeEvent{ 1719 Message: "Drain strategy enabled", 1720 Subsystem: structs.NodeEventSubsystemDrain, 1721 Timestamp: time.Now(), 1722 } 1723 require.Nil(state.UpdateNodeDrain(structs.MsgTypeTestSetup, 1001, node.ID, expectedDrain, false, 7, event, nil, "")) 1724 require.True(watchFired(ws)) 1725 1726 ws = memdb.NewWatchSet() 1727 out, err := state.NodeByID(ws, node.ID) 1728 require.Nil(err) 1729 require.NotNil(out.DrainStrategy) 1730 require.NotNil(out.LastDrain) 1731 require.Equal(structs.DrainStatusDraining, out.LastDrain.Status) 1732 require.Equal(out.DrainStrategy, expectedDrain) 1733 require.Len(out.Events, 2) 1734 require.EqualValues(1001, out.ModifyIndex) 1735 require.EqualValues(7, out.StatusUpdatedAt) 1736 1737 index, err := state.Index("nodes") 1738 require.Nil(err) 1739 require.EqualValues(1001, index) 1740 require.False(watchFired(ws)) 1741 } 1742 1743 func TestStateStore_AddSingleNodeEvent(t *testing.T) { 1744 ci.Parallel(t) 1745 require := require.New(t) 1746 1747 state := testStateStore(t) 1748 1749 node := mock.Node() 1750 1751 // We create a new node event every time we register a node 1752 err := state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) 1753 require.Nil(err) 1754 1755 require.Equal(1, len(node.Events)) 1756 require.Equal(structs.NodeEventSubsystemCluster, node.Events[0].Subsystem) 1757 require.Equal(NodeRegisterEventRegistered, node.Events[0].Message) 1758 1759 // Create a watchset so we can test that AddNodeEvent fires the watch 1760 ws := memdb.NewWatchSet() 1761 _, err = state.NodeByID(ws, node.ID) 1762 require.Nil(err) 1763 1764 nodeEvent := &structs.NodeEvent{ 1765 Message: "failed", 1766 Subsystem: "Driver", 1767 Timestamp: time.Now(), 1768 } 1769 nodeEvents := map[string][]*structs.NodeEvent{ 1770 node.ID: {nodeEvent}, 1771 } 1772 err = state.UpsertNodeEvents(structs.MsgTypeTestSetup, uint64(1001), nodeEvents) 1773 require.Nil(err) 1774 1775 require.True(watchFired(ws)) 1776 1777 ws = memdb.NewWatchSet() 1778 out, err := state.NodeByID(ws, node.ID) 1779 require.Nil(err) 1780 1781 require.Equal(2, len(out.Events)) 1782 require.Equal(nodeEvent, out.Events[1]) 1783 } 1784 1785 // To prevent stale node events from accumulating, we limit the number of 1786 // stored node events to 10. 1787 func TestStateStore_NodeEvents_RetentionWindow(t *testing.T) { 1788 ci.Parallel(t) 1789 require := require.New(t) 1790 1791 state := testStateStore(t) 1792 1793 node := mock.Node() 1794 1795 err := state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) 1796 if err != nil { 1797 t.Fatalf("err: %v", err) 1798 } 1799 require.Equal(1, len(node.Events)) 1800 require.Equal(structs.NodeEventSubsystemCluster, node.Events[0].Subsystem) 1801 require.Equal(NodeRegisterEventRegistered, node.Events[0].Message) 1802 1803 var out *structs.Node 1804 for i := 1; i <= 20; i++ { 1805 ws := memdb.NewWatchSet() 1806 out, err = state.NodeByID(ws, node.ID) 1807 require.Nil(err) 1808 1809 nodeEvent := &structs.NodeEvent{ 1810 Message: fmt.Sprintf("%dith failed", i), 1811 Subsystem: "Driver", 1812 Timestamp: time.Now(), 1813 } 1814 1815 nodeEvents := map[string][]*structs.NodeEvent{ 1816 out.ID: {nodeEvent}, 1817 } 1818 err := state.UpsertNodeEvents(structs.MsgTypeTestSetup, uint64(i), nodeEvents) 1819 require.Nil(err) 1820 1821 require.True(watchFired(ws)) 1822 ws = memdb.NewWatchSet() 1823 out, err = state.NodeByID(ws, node.ID) 1824 require.Nil(err) 1825 } 1826 1827 ws := memdb.NewWatchSet() 1828 out, err = state.NodeByID(ws, node.ID) 1829 require.Nil(err) 1830 1831 require.Equal(10, len(out.Events)) 1832 require.Equal(uint64(11), out.Events[0].CreateIndex) 1833 require.Equal(uint64(20), out.Events[len(out.Events)-1].CreateIndex) 1834 } 1835 1836 func TestStateStore_UpdateNodeDrain_ResetEligiblity(t *testing.T) { 1837 ci.Parallel(t) 1838 require := require.New(t) 1839 1840 state := testStateStore(t) 1841 node := mock.Node() 1842 require.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1000, node)) 1843 1844 // Create a watchset so we can test that update node drain fires the watch 1845 ws := memdb.NewWatchSet() 1846 _, err := state.NodeByID(ws, node.ID) 1847 require.Nil(err) 1848 1849 drain := &structs.DrainStrategy{ 1850 DrainSpec: structs.DrainSpec{ 1851 Deadline: -1 * time.Second, 1852 }, 1853 } 1854 1855 event1 := &structs.NodeEvent{ 1856 Message: "Drain strategy enabled", 1857 Subsystem: structs.NodeEventSubsystemDrain, 1858 Timestamp: time.Now(), 1859 } 1860 require.Nil(state.UpdateNodeDrain(structs.MsgTypeTestSetup, 1001, node.ID, drain, false, 7, event1, nil, "")) 1861 require.True(watchFired(ws)) 1862 1863 // Remove the drain 1864 event2 := &structs.NodeEvent{ 1865 Message: "Drain strategy disabled", 1866 Subsystem: structs.NodeEventSubsystemDrain, 1867 Timestamp: time.Now(), 1868 } 1869 require.Nil(state.UpdateNodeDrain(structs.MsgTypeTestSetup, 1002, node.ID, nil, true, 9, event2, nil, "")) 1870 1871 ws = memdb.NewWatchSet() 1872 out, err := state.NodeByID(ws, node.ID) 1873 require.Nil(err) 1874 require.Nil(out.DrainStrategy) 1875 require.Equal(out.SchedulingEligibility, structs.NodeSchedulingEligible) 1876 require.NotNil(out.LastDrain) 1877 require.Equal(structs.DrainStatusCanceled, out.LastDrain.Status) 1878 require.Equal(time.Unix(7, 0), out.LastDrain.StartedAt) 1879 require.Equal(time.Unix(9, 0), out.LastDrain.UpdatedAt) 1880 require.Len(out.Events, 3) 1881 require.EqualValues(1002, out.ModifyIndex) 1882 require.EqualValues(9, out.StatusUpdatedAt) 1883 1884 index, err := state.Index("nodes") 1885 require.Nil(err) 1886 require.EqualValues(1002, index) 1887 require.False(watchFired(ws)) 1888 } 1889 1890 func TestStateStore_UpdateNodeEligibility(t *testing.T) { 1891 ci.Parallel(t) 1892 require := require.New(t) 1893 1894 state := testStateStore(t) 1895 node := mock.Node() 1896 1897 err := state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) 1898 if err != nil { 1899 t.Fatalf("err: %v", err) 1900 } 1901 1902 expectedEligibility := structs.NodeSchedulingIneligible 1903 1904 // Create a watchset so we can test that update node drain fires the watch 1905 ws := memdb.NewWatchSet() 1906 if _, err := state.NodeByID(ws, node.ID); err != nil { 1907 t.Fatalf("bad: %v", err) 1908 } 1909 1910 event := &structs.NodeEvent{ 1911 Message: "Node marked as ineligible", 1912 Subsystem: structs.NodeEventSubsystemCluster, 1913 Timestamp: time.Now(), 1914 } 1915 require.Nil(state.UpdateNodeEligibility(structs.MsgTypeTestSetup, 1001, node.ID, expectedEligibility, 7, event)) 1916 require.True(watchFired(ws)) 1917 1918 ws = memdb.NewWatchSet() 1919 out, err := state.NodeByID(ws, node.ID) 1920 require.Nil(err) 1921 require.Equal(out.SchedulingEligibility, expectedEligibility) 1922 require.Len(out.Events, 2) 1923 require.Equal(out.Events[1], event) 1924 require.EqualValues(1001, out.ModifyIndex) 1925 require.EqualValues(7, out.StatusUpdatedAt) 1926 1927 index, err := state.Index("nodes") 1928 require.Nil(err) 1929 require.EqualValues(1001, index) 1930 require.False(watchFired(ws)) 1931 1932 // Set a drain strategy 1933 expectedDrain := &structs.DrainStrategy{ 1934 DrainSpec: structs.DrainSpec{ 1935 Deadline: -1 * time.Second, 1936 }, 1937 } 1938 require.Nil(state.UpdateNodeDrain(structs.MsgTypeTestSetup, 1002, node.ID, expectedDrain, false, 7, nil, nil, "")) 1939 1940 // Try to set the node to eligible 1941 err = state.UpdateNodeEligibility(structs.MsgTypeTestSetup, 1003, node.ID, structs.NodeSchedulingEligible, 9, nil) 1942 require.NotNil(err) 1943 require.Contains(err.Error(), "while it is draining") 1944 } 1945 1946 func TestStateStore_Nodes(t *testing.T) { 1947 ci.Parallel(t) 1948 1949 state := testStateStore(t) 1950 var nodes []*structs.Node 1951 1952 for i := 0; i < 10; i++ { 1953 node := mock.Node() 1954 nodes = append(nodes, node) 1955 1956 err := state.UpsertNode(structs.MsgTypeTestSetup, 1000+uint64(i), node) 1957 if err != nil { 1958 t.Fatalf("err: %v", err) 1959 } 1960 } 1961 1962 // Create a watchset so we can test that getters don't cause it to fire 1963 ws := memdb.NewWatchSet() 1964 iter, err := state.Nodes(ws) 1965 if err != nil { 1966 t.Fatalf("bad: %v", err) 1967 } 1968 1969 var out []*structs.Node 1970 for { 1971 raw := iter.Next() 1972 if raw == nil { 1973 break 1974 } 1975 out = append(out, raw.(*structs.Node)) 1976 } 1977 1978 sort.Sort(NodeIDSort(nodes)) 1979 sort.Sort(NodeIDSort(out)) 1980 1981 if !reflect.DeepEqual(nodes, out) { 1982 t.Fatalf("bad: %#v %#v", nodes, out) 1983 } 1984 1985 if watchFired(ws) { 1986 t.Fatalf("bad") 1987 } 1988 } 1989 1990 func TestStateStore_NodesByIDPrefix(t *testing.T) { 1991 ci.Parallel(t) 1992 1993 state := testStateStore(t) 1994 node := mock.Node() 1995 1996 node.ID = "11111111-662e-d0ab-d1c9-3e434af7bdb4" 1997 err := state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) 1998 if err != nil { 1999 t.Fatalf("err: %v", err) 2000 } 2001 2002 // Create a watchset so we can test that getters don't cause it to fire 2003 ws := memdb.NewWatchSet() 2004 iter, err := state.NodesByIDPrefix(ws, node.ID) 2005 if err != nil { 2006 t.Fatalf("err: %v", err) 2007 } 2008 2009 gatherNodes := func(iter memdb.ResultIterator) []*structs.Node { 2010 var nodes []*structs.Node 2011 for { 2012 raw := iter.Next() 2013 if raw == nil { 2014 break 2015 } 2016 node := raw.(*structs.Node) 2017 nodes = append(nodes, node) 2018 } 2019 return nodes 2020 } 2021 2022 nodes := gatherNodes(iter) 2023 if len(nodes) != 1 { 2024 t.Fatalf("err: %v", err) 2025 } 2026 2027 if watchFired(ws) { 2028 t.Fatalf("bad") 2029 } 2030 2031 iter, err = state.NodesByIDPrefix(ws, "11") 2032 if err != nil { 2033 t.Fatalf("err: %v", err) 2034 } 2035 2036 nodes = gatherNodes(iter) 2037 if len(nodes) != 1 { 2038 t.Fatalf("err: %v", err) 2039 } 2040 2041 node = mock.Node() 2042 node.ID = "11222222-662e-d0ab-d1c9-3e434af7bdb4" 2043 err = state.UpsertNode(structs.MsgTypeTestSetup, 1001, node) 2044 if err != nil { 2045 t.Fatalf("err: %v", err) 2046 } 2047 2048 if !watchFired(ws) { 2049 t.Fatalf("bad") 2050 } 2051 2052 ws = memdb.NewWatchSet() 2053 iter, err = state.NodesByIDPrefix(ws, "11") 2054 if err != nil { 2055 t.Fatalf("err: %v", err) 2056 } 2057 2058 nodes = gatherNodes(iter) 2059 if len(nodes) != 2 { 2060 t.Fatalf("err: %v", err) 2061 } 2062 2063 iter, err = state.NodesByIDPrefix(ws, "1111") 2064 if err != nil { 2065 t.Fatalf("err: %v", err) 2066 } 2067 2068 nodes = gatherNodes(iter) 2069 if len(nodes) != 1 { 2070 t.Fatalf("err: %v", err) 2071 } 2072 2073 if watchFired(ws) { 2074 t.Fatalf("bad") 2075 } 2076 } 2077 2078 func TestStateStore_NodesByNodePool(t *testing.T) { 2079 ci.Parallel(t) 2080 2081 state := testStateStore(t) 2082 2083 pool := mock.NodePool() 2084 err := state.UpsertNodePools(structs.MsgTypeTestSetup, 1000, []*structs.NodePool{pool}) 2085 must.NoError(t, err) 2086 2087 node1 := mock.Node() 2088 node1.NodePool = structs.NodePoolDefault 2089 err = state.UpsertNode(structs.MsgTypeTestSetup, 1001, node1) 2090 must.NoError(t, err) 2091 2092 node2 := mock.Node() 2093 node2.NodePool = pool.Name 2094 err = state.UpsertNode(structs.MsgTypeTestSetup, 1002, node2) 2095 must.NoError(t, err) 2096 2097 testCases := []struct { 2098 name string 2099 pool string 2100 expected []string 2101 }{ 2102 { 2103 name: "default", 2104 pool: structs.NodePoolDefault, 2105 expected: []string{ 2106 node1.ID, 2107 }, 2108 }, 2109 { 2110 name: "pool", 2111 pool: pool.Name, 2112 expected: []string{ 2113 node2.ID, 2114 }, 2115 }, 2116 { 2117 name: "empty pool", 2118 pool: "", 2119 expected: []string{}, 2120 }, 2121 } 2122 for _, tc := range testCases { 2123 t.Run(tc.name, func(t *testing.T) { 2124 // Create watcher to test that getters don't cause it to fire. 2125 ws := memdb.NewWatchSet() 2126 2127 iter, err := state.NodesByNodePool(ws, tc.pool) 2128 must.NoError(t, err) 2129 2130 got := []string{} 2131 for raw := iter.Next(); raw != nil; raw = iter.Next() { 2132 got = append(got, raw.(*structs.Node).ID) 2133 } 2134 2135 must.SliceContainsAll(t, tc.expected, got) 2136 must.False(t, watchFired(ws)) 2137 }) 2138 } 2139 } 2140 2141 func TestStateStore_UpsertJob_Job(t *testing.T) { 2142 ci.Parallel(t) 2143 2144 state := testStateStore(t) 2145 job := mock.Job() 2146 2147 // Create a watchset so we can test that upsert fires the watch 2148 ws := memdb.NewWatchSet() 2149 _, err := state.JobByID(ws, job.Namespace, job.ID) 2150 if err != nil { 2151 t.Fatalf("bad: %v", err) 2152 } 2153 2154 if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job); err != nil { 2155 t.Fatalf("err: %v", err) 2156 } 2157 if !watchFired(ws) { 2158 t.Fatalf("bad") 2159 } 2160 2161 ws = memdb.NewWatchSet() 2162 out, err := state.JobByID(ws, job.Namespace, job.ID) 2163 if err != nil { 2164 t.Fatalf("err: %v", err) 2165 } 2166 2167 if !reflect.DeepEqual(job, out) { 2168 t.Fatalf("bad: %#v %#v", job, out) 2169 } 2170 2171 index, err := state.Index("jobs") 2172 if err != nil { 2173 t.Fatalf("err: %v", err) 2174 } 2175 if index != 1000 { 2176 t.Fatalf("bad: %d", index) 2177 } 2178 2179 summary, err := state.JobSummaryByID(ws, job.Namespace, job.ID) 2180 if err != nil { 2181 t.Fatalf("err: %v", err) 2182 } 2183 if summary == nil { 2184 t.Fatalf("nil summary") 2185 } 2186 if summary.JobID != job.ID { 2187 t.Fatalf("bad summary id: %v", summary.JobID) 2188 } 2189 _, ok := summary.Summary["web"] 2190 if !ok { 2191 t.Fatalf("nil summary for task group") 2192 } 2193 if watchFired(ws) { 2194 t.Fatalf("bad") 2195 } 2196 2197 // Check the job versions 2198 allVersions, err := state.JobVersionsByID(ws, job.Namespace, job.ID) 2199 if err != nil { 2200 t.Fatalf("err: %v", err) 2201 } 2202 if len(allVersions) != 1 { 2203 t.Fatalf("got %d; want 1", len(allVersions)) 2204 } 2205 2206 if a := allVersions[0]; a.ID != job.ID || a.Version != 0 { 2207 t.Fatalf("bad: %v", a) 2208 } 2209 2210 // Test the looking up the job by version returns the same results 2211 vout, err := state.JobByIDAndVersion(ws, job.Namespace, job.ID, 0) 2212 if err != nil { 2213 t.Fatalf("err: %v", err) 2214 } 2215 2216 if !reflect.DeepEqual(out, vout) { 2217 t.Fatalf("bad: %#v %#v", out, vout) 2218 } 2219 } 2220 2221 func TestStateStore_UpdateUpsertJob_Job(t *testing.T) { 2222 ci.Parallel(t) 2223 2224 state := testStateStore(t) 2225 job := mock.Job() 2226 2227 // Create a watchset so we can test that upsert fires the watch 2228 ws := memdb.NewWatchSet() 2229 _, err := state.JobByID(ws, job.Namespace, job.ID) 2230 if err != nil { 2231 t.Fatalf("bad: %v", err) 2232 } 2233 2234 if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job); err != nil { 2235 t.Fatalf("err: %v", err) 2236 } 2237 2238 job2 := mock.Job() 2239 job2.ID = job.ID 2240 job2.AllAtOnce = true 2241 err = state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job2) 2242 if err != nil { 2243 t.Fatalf("err: %v", err) 2244 } 2245 2246 if !watchFired(ws) { 2247 t.Fatalf("bad") 2248 } 2249 2250 ws = memdb.NewWatchSet() 2251 out, err := state.JobByID(ws, job.Namespace, job.ID) 2252 if err != nil { 2253 t.Fatalf("err: %v", err) 2254 } 2255 2256 if !reflect.DeepEqual(job2, out) { 2257 t.Fatalf("bad: %#v %#v", job2, out) 2258 } 2259 2260 if out.CreateIndex != 1000 { 2261 t.Fatalf("bad: %#v", out) 2262 } 2263 if out.ModifyIndex != 1001 { 2264 t.Fatalf("bad: %#v", out) 2265 } 2266 if out.Version != 1 { 2267 t.Fatalf("bad: %#v", out) 2268 } 2269 2270 index, err := state.Index("jobs") 2271 if err != nil { 2272 t.Fatalf("err: %v", err) 2273 } 2274 if index != 1001 { 2275 t.Fatalf("bad: %d", index) 2276 } 2277 2278 // Test the looking up the job by version returns the same results 2279 vout, err := state.JobByIDAndVersion(ws, job.Namespace, job.ID, 1) 2280 if err != nil { 2281 t.Fatalf("err: %v", err) 2282 } 2283 2284 if !reflect.DeepEqual(out, vout) { 2285 t.Fatalf("bad: %#v %#v", out, vout) 2286 } 2287 2288 // Test that the job summary remains the same if the job is updated but 2289 // count remains same 2290 summary, err := state.JobSummaryByID(ws, job.Namespace, job.ID) 2291 if err != nil { 2292 t.Fatalf("err: %v", err) 2293 } 2294 if summary == nil { 2295 t.Fatalf("nil summary") 2296 } 2297 if summary.JobID != job.ID { 2298 t.Fatalf("bad summary id: %v", summary.JobID) 2299 } 2300 _, ok := summary.Summary["web"] 2301 if !ok { 2302 t.Fatalf("nil summary for task group") 2303 } 2304 2305 // Check the job versions 2306 allVersions, err := state.JobVersionsByID(ws, job.Namespace, job.ID) 2307 if err != nil { 2308 t.Fatalf("err: %v", err) 2309 } 2310 if len(allVersions) != 2 { 2311 t.Fatalf("got %d; want 1", len(allVersions)) 2312 } 2313 2314 if a := allVersions[0]; a.ID != job.ID || a.Version != 1 || !a.AllAtOnce { 2315 t.Fatalf("bad: %+v", a) 2316 } 2317 if a := allVersions[1]; a.ID != job.ID || a.Version != 0 || a.AllAtOnce { 2318 t.Fatalf("bad: %+v", a) 2319 } 2320 2321 if watchFired(ws) { 2322 t.Fatalf("bad") 2323 } 2324 } 2325 2326 func TestStateStore_UpdateUpsertJob_PeriodicJob(t *testing.T) { 2327 ci.Parallel(t) 2328 2329 state := testStateStore(t) 2330 job := mock.PeriodicJob() 2331 2332 // Create a watchset so we can test that upsert fires the watch 2333 ws := memdb.NewWatchSet() 2334 _, err := state.JobByID(ws, job.Namespace, job.ID) 2335 if err != nil { 2336 t.Fatalf("bad: %v", err) 2337 } 2338 2339 if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job); err != nil { 2340 t.Fatalf("err: %v", err) 2341 } 2342 2343 // Create a child and an evaluation 2344 job2 := job.Copy() 2345 job2.Periodic = nil 2346 job2.ID = fmt.Sprintf("%v/%s-1490635020", job.ID, structs.PeriodicLaunchSuffix) 2347 err = state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job2) 2348 if err != nil { 2349 t.Fatalf("err: %v", err) 2350 } 2351 2352 eval := mock.Eval() 2353 eval.JobID = job2.ID 2354 err = state.UpsertEvals(structs.MsgTypeTestSetup, 1002, []*structs.Evaluation{eval}) 2355 if err != nil { 2356 t.Fatalf("err: %v", err) 2357 } 2358 2359 job3 := job.Copy() 2360 job3.TaskGroups[0].Tasks[0].Name = "new name" 2361 err = state.UpsertJob(structs.MsgTypeTestSetup, 1003, nil, job3) 2362 if err != nil { 2363 t.Fatalf("err: %v", err) 2364 } 2365 2366 if !watchFired(ws) { 2367 t.Fatalf("bad") 2368 } 2369 2370 ws = memdb.NewWatchSet() 2371 out, err := state.JobByID(ws, job.Namespace, job.ID) 2372 if err != nil { 2373 t.Fatalf("err: %v", err) 2374 } 2375 2376 if s, e := out.Status, structs.JobStatusRunning; s != e { 2377 t.Fatalf("got status %v; want %v", s, e) 2378 } 2379 2380 } 2381 2382 func TestStateStore_UpsertJob_BadNamespace(t *testing.T) { 2383 ci.Parallel(t) 2384 2385 assert := assert.New(t) 2386 state := testStateStore(t) 2387 job := mock.Job() 2388 job.Namespace = "foo" 2389 2390 err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) 2391 assert.Contains(err.Error(), "nonexistent namespace") 2392 2393 ws := memdb.NewWatchSet() 2394 out, err := state.JobByID(ws, job.Namespace, job.ID) 2395 assert.Nil(err) 2396 assert.Nil(out) 2397 } 2398 2399 func TestStateStore_UpsertJob_NodePool(t *testing.T) { 2400 ci.Parallel(t) 2401 2402 state := testStateStore(t) 2403 2404 testCases := []struct { 2405 name string 2406 pool string 2407 expectedPool string 2408 expectedErr string 2409 }{ 2410 { 2411 name: "empty node pool uses default", 2412 pool: "", 2413 expectedPool: structs.NodePoolDefault, 2414 }, 2415 { 2416 name: "job uses pool defined", 2417 pool: structs.NodePoolDefault, 2418 expectedPool: structs.NodePoolDefault, 2419 }, 2420 { 2421 name: "error when pool doesn't exist", 2422 pool: "nonexisting", 2423 expectedErr: "nonexistent node pool", 2424 }, 2425 } 2426 2427 for i, tc := range testCases { 2428 t.Run(tc.name, func(t *testing.T) { 2429 job := mock.Job() 2430 job.NodePool = tc.pool 2431 2432 err := state.UpsertJob(structs.MsgTypeTestSetup, uint64(1000+i), nil, job) 2433 if tc.expectedErr != "" { 2434 must.ErrorContains(t, err, tc.expectedErr) 2435 } else { 2436 must.NoError(t, err) 2437 2438 ws := memdb.NewWatchSet() 2439 got, err := state.JobByID(ws, job.Namespace, job.ID) 2440 must.NoError(t, err) 2441 must.Eq(t, tc.expectedPool, got.NodePool) 2442 } 2443 }) 2444 } 2445 } 2446 2447 // Upsert a job that is the child of a parent job and ensures its summary gets 2448 // updated. 2449 func TestStateStore_UpsertJob_ChildJob(t *testing.T) { 2450 ci.Parallel(t) 2451 2452 state := testStateStore(t) 2453 2454 // Create a watchset so we can test that upsert fires the watch 2455 parent := mock.Job() 2456 ws := memdb.NewWatchSet() 2457 _, err := state.JobByID(ws, parent.Namespace, parent.ID) 2458 if err != nil { 2459 t.Fatalf("bad: %v", err) 2460 } 2461 2462 if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, parent); err != nil { 2463 t.Fatalf("err: %v", err) 2464 } 2465 2466 child := mock.Job() 2467 child.Status = "" 2468 child.ParentID = parent.ID 2469 if err := state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, child); err != nil { 2470 t.Fatalf("err: %v", err) 2471 } 2472 2473 summary, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID) 2474 if err != nil { 2475 t.Fatalf("err: %v", err) 2476 } 2477 if summary == nil { 2478 t.Fatalf("nil summary") 2479 } 2480 if summary.JobID != parent.ID { 2481 t.Fatalf("bad summary id: %v", parent.ID) 2482 } 2483 if summary.Children == nil { 2484 t.Fatalf("nil children summary") 2485 } 2486 if summary.Children.Pending != 1 || summary.Children.Running != 0 || summary.Children.Dead != 0 { 2487 t.Fatalf("bad children summary: %v", summary.Children) 2488 } 2489 if !watchFired(ws) { 2490 t.Fatalf("bad") 2491 } 2492 } 2493 2494 func TestStateStore_UpsertJob_submission(t *testing.T) { 2495 ci.Parallel(t) 2496 2497 state := testStateStore(t) 2498 2499 job := mock.Job() 2500 job.Meta = map[string]string{"version": "1"} 2501 submission := &structs.JobSubmission{ 2502 Source: "source", 2503 Version: 0, 2504 } 2505 2506 index := uint64(1000) 2507 2508 // initially non-existent 2509 sub, err := state.JobSubmission(nil, job.Namespace, job.ID, 0) 2510 must.NoError(t, err) 2511 must.Nil(t, sub) 2512 2513 // insert first one, version 0, index 1001 2514 index++ 2515 err = state.UpsertJob(structs.JobRegisterRequestType, index, submission, job) 2516 must.NoError(t, err) 2517 2518 // query first one, version 0 2519 sub, err = state.JobSubmission(nil, job.Namespace, job.ID, 0) 2520 must.NoError(t, err) 2521 must.NotNil(t, sub) 2522 must.Eq(t, 0, sub.Version) 2523 must.Eq(t, index, sub.JobModifyIndex) 2524 2525 // insert 6 more, going over the limit 2526 for i := 1; i <= structs.JobTrackedVersions; i++ { 2527 index++ 2528 job2 := job.Copy() 2529 job2.Meta["version"] = strconv.Itoa(i) 2530 sub2 := &structs.JobSubmission{ 2531 Source: "source", 2532 Version: uint64(i), 2533 } 2534 err = state.UpsertJob(structs.JobRegisterRequestType, index, sub2, job2) 2535 must.NoError(t, err) 2536 } 2537 2538 // the version 0 submission is now dropped 2539 sub, err = state.JobSubmission(nil, job.Namespace, job.ID, 0) 2540 must.NoError(t, err) 2541 must.Nil(t, sub) 2542 2543 // but we do have version 1 2544 sub, err = state.JobSubmission(nil, job.Namespace, job.ID, 1) 2545 must.NoError(t, err) 2546 must.NotNil(t, sub) 2547 must.Eq(t, 1, sub.Version) 2548 must.Eq(t, 1002, sub.JobModifyIndex) 2549 2550 // and up to version 6 2551 sub, err = state.JobSubmission(nil, job.Namespace, job.ID, 6) 2552 must.NoError(t, err) 2553 must.NotNil(t, sub) 2554 must.Eq(t, 6, sub.Version) 2555 must.Eq(t, 1007, sub.JobModifyIndex) 2556 } 2557 2558 func TestStateStore_UpdateUpsertJob_JobVersion(t *testing.T) { 2559 ci.Parallel(t) 2560 2561 state := testStateStore(t) 2562 2563 // Create a job and mark it as stable 2564 job := mock.Job() 2565 job.Stable = true 2566 job.Name = "0" 2567 2568 // Create a watchset so we can test that upsert fires the watch 2569 ws := memdb.NewWatchSet() 2570 _, err := state.JobVersionsByID(ws, job.Namespace, job.ID) 2571 if err != nil { 2572 t.Fatalf("bad: %v", err) 2573 } 2574 2575 if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job); err != nil { 2576 t.Fatalf("err: %v", err) 2577 } 2578 2579 if !watchFired(ws) { 2580 t.Fatalf("bad") 2581 } 2582 2583 var finalJob *structs.Job 2584 for i := 1; i < 300; i++ { 2585 finalJob = mock.Job() 2586 finalJob.ID = job.ID 2587 finalJob.Name = fmt.Sprintf("%d", i) 2588 err = state.UpsertJob(structs.MsgTypeTestSetup, uint64(1000+i), nil, finalJob) 2589 if err != nil { 2590 t.Fatalf("err: %v", err) 2591 } 2592 } 2593 2594 ws = memdb.NewWatchSet() 2595 out, err := state.JobByID(ws, job.Namespace, job.ID) 2596 if err != nil { 2597 t.Fatalf("err: %v", err) 2598 } 2599 2600 if !reflect.DeepEqual(finalJob, out) { 2601 t.Fatalf("bad: %#v %#v", finalJob, out) 2602 } 2603 2604 if out.CreateIndex != 1000 { 2605 t.Fatalf("bad: %#v", out) 2606 } 2607 if out.ModifyIndex != 1299 { 2608 t.Fatalf("bad: %#v", out) 2609 } 2610 if out.Version != 299 { 2611 t.Fatalf("bad: %#v", out) 2612 } 2613 2614 index, err := state.Index("job_version") 2615 if err != nil { 2616 t.Fatalf("err: %v", err) 2617 } 2618 if index != 1299 { 2619 t.Fatalf("bad: %d", index) 2620 } 2621 2622 // Check the job versions 2623 allVersions, err := state.JobVersionsByID(ws, job.Namespace, job.ID) 2624 if err != nil { 2625 t.Fatalf("err: %v", err) 2626 } 2627 if len(allVersions) != structs.JobTrackedVersions { 2628 t.Fatalf("got %d; want %d", len(allVersions), structs.JobTrackedVersions) 2629 } 2630 2631 if a := allVersions[0]; a.ID != job.ID || a.Version != 299 || a.Name != "299" { 2632 t.Fatalf("bad: %+v", a) 2633 } 2634 if a := allVersions[1]; a.ID != job.ID || a.Version != 298 || a.Name != "298" { 2635 t.Fatalf("bad: %+v", a) 2636 } 2637 2638 // Ensure we didn't delete the stable job 2639 if a := allVersions[structs.JobTrackedVersions-1]; a.ID != job.ID || 2640 a.Version != 0 || a.Name != "0" || !a.Stable { 2641 t.Fatalf("bad: %+v", a) 2642 } 2643 2644 if watchFired(ws) { 2645 t.Fatalf("bad") 2646 } 2647 } 2648 2649 func TestStateStore_DeleteJob_Job(t *testing.T) { 2650 ci.Parallel(t) 2651 2652 state := testStateStore(t) 2653 job := mock.Job() 2654 2655 err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) 2656 if err != nil { 2657 t.Fatalf("err: %v", err) 2658 } 2659 2660 // Create a watchset so we can test that delete fires the watch 2661 ws := memdb.NewWatchSet() 2662 if _, err := state.JobByID(ws, job.Namespace, job.ID); err != nil { 2663 t.Fatalf("bad: %v", err) 2664 } 2665 2666 err = state.DeleteJob(1001, job.Namespace, job.ID) 2667 if err != nil { 2668 t.Fatalf("err: %v", err) 2669 } 2670 2671 if !watchFired(ws) { 2672 t.Fatalf("bad") 2673 } 2674 2675 ws = memdb.NewWatchSet() 2676 out, err := state.JobByID(ws, job.Namespace, job.ID) 2677 if err != nil { 2678 t.Fatalf("err: %v", err) 2679 } 2680 2681 if out != nil { 2682 t.Fatalf("bad: %#v %#v", job, out) 2683 } 2684 2685 index, err := state.Index("jobs") 2686 if err != nil { 2687 t.Fatalf("err: %v", err) 2688 } 2689 if index != 1001 { 2690 t.Fatalf("bad: %d", index) 2691 } 2692 2693 summary, err := state.JobSummaryByID(ws, job.Namespace, job.ID) 2694 if err != nil { 2695 t.Fatalf("err: %v", err) 2696 } 2697 if summary != nil { 2698 t.Fatalf("expected summary to be nil, but got: %v", summary) 2699 } 2700 2701 index, err = state.Index("job_summary") 2702 if err != nil { 2703 t.Fatalf("err: %v", err) 2704 } 2705 if index != 1001 { 2706 t.Fatalf("bad: %d", index) 2707 } 2708 2709 versions, err := state.JobVersionsByID(ws, job.Namespace, job.ID) 2710 if err != nil { 2711 t.Fatalf("err: %v", err) 2712 } 2713 if len(versions) != 0 { 2714 t.Fatalf("expected no job versions") 2715 } 2716 2717 index, err = state.Index("job_summary") 2718 if err != nil { 2719 t.Fatalf("err: %v", err) 2720 } 2721 if index != 1001 { 2722 t.Fatalf("bad: %d", index) 2723 } 2724 2725 if watchFired(ws) { 2726 t.Fatalf("bad") 2727 } 2728 } 2729 2730 func TestStateStore_DeleteJobTxn_BatchDeletes(t *testing.T) { 2731 ci.Parallel(t) 2732 2733 state := testStateStore(t) 2734 2735 const testJobCount = 10 2736 const jobVersionCount = 4 2737 2738 stateIndex := uint64(1000) 2739 2740 jobs := make([]*structs.Job, testJobCount) 2741 for i := 0; i < testJobCount; i++ { 2742 stateIndex++ 2743 job := mock.BatchJob() 2744 2745 err := state.UpsertJob(structs.MsgTypeTestSetup, stateIndex, nil, job) 2746 require.NoError(t, err) 2747 2748 jobs[i] = job 2749 2750 // Create some versions 2751 for vi := 1; vi < jobVersionCount; vi++ { 2752 stateIndex++ 2753 2754 job := job.Copy() 2755 job.TaskGroups[0].Tasks[0].Env = map[string]string{ 2756 "Version": fmt.Sprintf("%d", vi), 2757 } 2758 2759 require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, stateIndex, nil, job)) 2760 } 2761 } 2762 2763 ws := memdb.NewWatchSet() 2764 2765 // Check that jobs are present in DB 2766 job, err := state.JobByID(ws, jobs[0].Namespace, jobs[0].ID) 2767 require.NoError(t, err) 2768 require.Equal(t, jobs[0].ID, job.ID) 2769 2770 jobVersions, err := state.JobVersionsByID(ws, jobs[0].Namespace, jobs[0].ID) 2771 require.NoError(t, err) 2772 require.Equal(t, jobVersionCount, len(jobVersions)) 2773 2774 // Actually delete 2775 const deletionIndex = uint64(10001) 2776 err = state.WithWriteTransaction(structs.MsgTypeTestSetup, deletionIndex, func(txn Txn) error { 2777 for i, job := range jobs { 2778 err := state.DeleteJobTxn(deletionIndex, job.Namespace, job.ID, txn) 2779 require.NoError(t, err, "failed at %d %e", i, err) 2780 } 2781 return nil 2782 }) 2783 assert.NoError(t, err) 2784 2785 assert.True(t, watchFired(ws)) 2786 2787 ws = memdb.NewWatchSet() 2788 out, err := state.JobByID(ws, jobs[0].Namespace, jobs[0].ID) 2789 require.NoError(t, err) 2790 require.Nil(t, out) 2791 2792 jobVersions, err = state.JobVersionsByID(ws, jobs[0].Namespace, jobs[0].ID) 2793 require.NoError(t, err) 2794 require.Empty(t, jobVersions) 2795 2796 index, err := state.Index("jobs") 2797 require.NoError(t, err) 2798 require.Equal(t, deletionIndex, index) 2799 } 2800 2801 func TestStateStore_DeleteJob_MultipleVersions(t *testing.T) { 2802 ci.Parallel(t) 2803 2804 state := testStateStore(t) 2805 assert := assert.New(t) 2806 2807 // Create a job and mark it as stable 2808 job := mock.Job() 2809 job.Stable = true 2810 job.Priority = 0 2811 2812 // Create a watchset so we can test that upsert fires the watch 2813 ws := memdb.NewWatchSet() 2814 _, err := state.JobVersionsByID(ws, job.Namespace, job.ID) 2815 assert.Nil(err) 2816 assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)) 2817 assert.True(watchFired(ws)) 2818 2819 var finalJob *structs.Job 2820 for i := 1; i < 20; i++ { 2821 finalJob = mock.Job() 2822 finalJob.ID = job.ID 2823 finalJob.Priority = i 2824 assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, uint64(1000+i), nil, finalJob)) 2825 } 2826 2827 assert.Nil(state.DeleteJob(1020, job.Namespace, job.ID)) 2828 assert.True(watchFired(ws)) 2829 2830 ws = memdb.NewWatchSet() 2831 out, err := state.JobByID(ws, job.Namespace, job.ID) 2832 assert.Nil(err) 2833 assert.Nil(out) 2834 2835 index, err := state.Index("jobs") 2836 assert.Nil(err) 2837 assert.EqualValues(1020, index) 2838 2839 summary, err := state.JobSummaryByID(ws, job.Namespace, job.ID) 2840 assert.Nil(err) 2841 assert.Nil(summary) 2842 2843 index, err = state.Index("job_version") 2844 assert.Nil(err) 2845 assert.EqualValues(1020, index) 2846 2847 versions, err := state.JobVersionsByID(ws, job.Namespace, job.ID) 2848 assert.Nil(err) 2849 assert.Len(versions, 0) 2850 2851 index, err = state.Index("job_summary") 2852 assert.Nil(err) 2853 assert.EqualValues(1020, index) 2854 2855 assert.False(watchFired(ws)) 2856 } 2857 2858 func TestStateStore_DeleteJob_ChildJob(t *testing.T) { 2859 ci.Parallel(t) 2860 2861 state := testStateStore(t) 2862 2863 parent := mock.Job() 2864 if err := state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, parent); err != nil { 2865 t.Fatalf("err: %v", err) 2866 } 2867 2868 child := mock.Job() 2869 child.Status = "" 2870 child.ParentID = parent.ID 2871 2872 if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, child); err != nil { 2873 t.Fatalf("err: %v", err) 2874 } 2875 2876 // Create a watchset so we can test that delete fires the watch 2877 ws := memdb.NewWatchSet() 2878 if _, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID); err != nil { 2879 t.Fatalf("bad: %v", err) 2880 } 2881 2882 err := state.DeleteJob(1001, child.Namespace, child.ID) 2883 if err != nil { 2884 t.Fatalf("err: %v", err) 2885 } 2886 if !watchFired(ws) { 2887 t.Fatalf("bad") 2888 } 2889 2890 ws = memdb.NewWatchSet() 2891 summary, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID) 2892 if err != nil { 2893 t.Fatalf("err: %v", err) 2894 } 2895 if summary == nil { 2896 t.Fatalf("nil summary") 2897 } 2898 if summary.JobID != parent.ID { 2899 t.Fatalf("bad summary id: %v", parent.ID) 2900 } 2901 if summary.Children == nil { 2902 t.Fatalf("nil children summary") 2903 } 2904 if summary.Children.Pending != 0 || summary.Children.Running != 0 || summary.Children.Dead != 1 { 2905 t.Fatalf("bad children summary: %v", summary.Children) 2906 } 2907 if watchFired(ws) { 2908 t.Fatalf("bad") 2909 } 2910 } 2911 2912 func TestStateStore_Jobs(t *testing.T) { 2913 ci.Parallel(t) 2914 2915 state := testStateStore(t) 2916 var jobs []*structs.Job 2917 2918 for i := 0; i < 10; i++ { 2919 job := mock.Job() 2920 jobs = append(jobs, job) 2921 2922 err := state.UpsertJob(structs.MsgTypeTestSetup, 1000+uint64(i), nil, job) 2923 if err != nil { 2924 t.Fatalf("err: %v", err) 2925 } 2926 } 2927 2928 ws := memdb.NewWatchSet() 2929 iter, err := state.Jobs(ws) 2930 if err != nil { 2931 t.Fatalf("err: %v", err) 2932 } 2933 2934 var out []*structs.Job 2935 for { 2936 raw := iter.Next() 2937 if raw == nil { 2938 break 2939 } 2940 out = append(out, raw.(*structs.Job)) 2941 } 2942 2943 sort.Sort(JobIDSort(jobs)) 2944 sort.Sort(JobIDSort(out)) 2945 2946 if !reflect.DeepEqual(jobs, out) { 2947 t.Fatalf("bad: %#v %#v", jobs, out) 2948 } 2949 if watchFired(ws) { 2950 t.Fatalf("bad") 2951 } 2952 } 2953 2954 func TestStateStore_JobVersions(t *testing.T) { 2955 ci.Parallel(t) 2956 2957 state := testStateStore(t) 2958 var jobs []*structs.Job 2959 2960 for i := 0; i < 10; i++ { 2961 job := mock.Job() 2962 jobs = append(jobs, job) 2963 2964 err := state.UpsertJob(structs.MsgTypeTestSetup, 1000+uint64(i), nil, job) 2965 if err != nil { 2966 t.Fatalf("err: %v", err) 2967 } 2968 } 2969 2970 ws := memdb.NewWatchSet() 2971 iter, err := state.JobVersions(ws) 2972 if err != nil { 2973 t.Fatalf("err: %v", err) 2974 } 2975 2976 var out []*structs.Job 2977 for { 2978 raw := iter.Next() 2979 if raw == nil { 2980 break 2981 } 2982 out = append(out, raw.(*structs.Job)) 2983 } 2984 2985 sort.Sort(JobIDSort(jobs)) 2986 sort.Sort(JobIDSort(out)) 2987 2988 if !reflect.DeepEqual(jobs, out) { 2989 t.Fatalf("bad: %#v %#v", jobs, out) 2990 } 2991 if watchFired(ws) { 2992 t.Fatalf("bad") 2993 } 2994 } 2995 2996 func TestStateStore_JobsByIDPrefix(t *testing.T) { 2997 ci.Parallel(t) 2998 2999 state := testStateStore(t) 3000 job := mock.Job() 3001 3002 job.ID = "redis" 3003 err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) 3004 if err != nil { 3005 t.Fatalf("err: %v", err) 3006 } 3007 3008 ws := memdb.NewWatchSet() 3009 iter, err := state.JobsByIDPrefix(ws, job.Namespace, job.ID) 3010 if err != nil { 3011 t.Fatalf("err: %v", err) 3012 } 3013 3014 gatherJobs := func(iter memdb.ResultIterator) []*structs.Job { 3015 var jobs []*structs.Job 3016 for { 3017 raw := iter.Next() 3018 if raw == nil { 3019 break 3020 } 3021 jobs = append(jobs, raw.(*structs.Job)) 3022 } 3023 return jobs 3024 } 3025 3026 jobs := gatherJobs(iter) 3027 if len(jobs) != 1 { 3028 t.Fatalf("err: %v", err) 3029 } 3030 3031 iter, err = state.JobsByIDPrefix(ws, job.Namespace, "re") 3032 if err != nil { 3033 t.Fatalf("err: %v", err) 3034 } 3035 3036 jobs = gatherJobs(iter) 3037 if len(jobs) != 1 { 3038 t.Fatalf("err: %v", err) 3039 } 3040 if watchFired(ws) { 3041 t.Fatalf("bad") 3042 } 3043 3044 job = mock.Job() 3045 job.ID = "riak" 3046 err = state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job) 3047 if err != nil { 3048 t.Fatalf("err: %v", err) 3049 } 3050 3051 if !watchFired(ws) { 3052 t.Fatalf("bad") 3053 } 3054 3055 ws = memdb.NewWatchSet() 3056 iter, err = state.JobsByIDPrefix(ws, job.Namespace, "r") 3057 if err != nil { 3058 t.Fatalf("err: %v", err) 3059 } 3060 3061 jobs = gatherJobs(iter) 3062 if len(jobs) != 2 { 3063 t.Fatalf("err: %v", err) 3064 } 3065 3066 iter, err = state.JobsByIDPrefix(ws, job.Namespace, "ri") 3067 if err != nil { 3068 t.Fatalf("err: %v", err) 3069 } 3070 3071 jobs = gatherJobs(iter) 3072 if len(jobs) != 1 { 3073 t.Fatalf("err: %v", err) 3074 } 3075 if watchFired(ws) { 3076 t.Fatalf("bad") 3077 } 3078 } 3079 3080 func TestStateStore_JobsByIDPrefix_Namespaces(t *testing.T) { 3081 ci.Parallel(t) 3082 3083 state := testStateStore(t) 3084 job1 := mock.Job() 3085 job2 := mock.Job() 3086 3087 ns1 := mock.Namespace() 3088 ns1.Name = "namespace1" 3089 ns2 := mock.Namespace() 3090 ns2.Name = "namespace2" 3091 3092 jobID := "redis" 3093 job1.ID = jobID 3094 job2.ID = jobID 3095 job1.Namespace = ns1.Name 3096 job2.Namespace = ns2.Name 3097 3098 require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) 3099 require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job1)) 3100 require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job2)) 3101 3102 gatherJobs := func(iter memdb.ResultIterator) []*structs.Job { 3103 var jobs []*structs.Job 3104 for { 3105 raw := iter.Next() 3106 if raw == nil { 3107 break 3108 } 3109 jobs = append(jobs, raw.(*structs.Job)) 3110 } 3111 return jobs 3112 } 3113 3114 // Try full match 3115 ws := memdb.NewWatchSet() 3116 iter1, err := state.JobsByIDPrefix(ws, ns1.Name, jobID) 3117 require.NoError(t, err) 3118 iter2, err := state.JobsByIDPrefix(ws, ns2.Name, jobID) 3119 require.NoError(t, err) 3120 3121 jobsNs1 := gatherJobs(iter1) 3122 require.Len(t, jobsNs1, 1) 3123 3124 jobsNs2 := gatherJobs(iter2) 3125 require.Len(t, jobsNs2, 1) 3126 3127 // Try prefix 3128 iter1, err = state.JobsByIDPrefix(ws, ns1.Name, "re") 3129 require.NoError(t, err) 3130 iter2, err = state.JobsByIDPrefix(ws, ns2.Name, "re") 3131 require.NoError(t, err) 3132 3133 jobsNs1 = gatherJobs(iter1) 3134 jobsNs2 = gatherJobs(iter2) 3135 require.Len(t, jobsNs1, 1) 3136 require.Len(t, jobsNs2, 1) 3137 3138 job3 := mock.Job() 3139 job3.ID = "riak" 3140 job3.Namespace = ns1.Name 3141 require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1003, nil, job3)) 3142 require.True(t, watchFired(ws)) 3143 3144 ws = memdb.NewWatchSet() 3145 iter1, err = state.JobsByIDPrefix(ws, ns1.Name, "r") 3146 require.NoError(t, err) 3147 iter2, err = state.JobsByIDPrefix(ws, ns2.Name, "r") 3148 require.NoError(t, err) 3149 3150 jobsNs1 = gatherJobs(iter1) 3151 jobsNs2 = gatherJobs(iter2) 3152 require.Len(t, jobsNs1, 2) 3153 require.Len(t, jobsNs2, 1) 3154 3155 iter1, err = state.JobsByIDPrefix(ws, ns1.Name, "ri") 3156 require.NoError(t, err) 3157 3158 jobsNs1 = gatherJobs(iter1) 3159 require.Len(t, jobsNs1, 1) 3160 require.False(t, watchFired(ws)) 3161 } 3162 3163 func TestStateStore_JobsByNamespace(t *testing.T) { 3164 ci.Parallel(t) 3165 3166 state := testStateStore(t) 3167 ns1 := mock.Namespace() 3168 ns1.Name = "new" 3169 job1 := mock.Job() 3170 job2 := mock.Job() 3171 job1.Namespace = ns1.Name 3172 job2.Namespace = ns1.Name 3173 3174 ns2 := mock.Namespace() 3175 ns2.Name = "new-namespace" 3176 job3 := mock.Job() 3177 job4 := mock.Job() 3178 job3.Namespace = ns2.Name 3179 job4.Namespace = ns2.Name 3180 3181 require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) 3182 3183 // Create watchsets so we can test that update fires the watch 3184 watches := []memdb.WatchSet{memdb.NewWatchSet(), memdb.NewWatchSet()} 3185 _, err := state.JobsByNamespace(watches[0], ns1.Name) 3186 require.NoError(t, err) 3187 _, err = state.JobsByNamespace(watches[1], ns2.Name) 3188 require.NoError(t, err) 3189 3190 require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job1)) 3191 require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1002, nil, job2)) 3192 require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1003, nil, job3)) 3193 require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1004, nil, job4)) 3194 require.True(t, watchFired(watches[0])) 3195 require.True(t, watchFired(watches[1])) 3196 3197 ws := memdb.NewWatchSet() 3198 iter1, err := state.JobsByNamespace(ws, ns1.Name) 3199 require.NoError(t, err) 3200 iter2, err := state.JobsByNamespace(ws, ns2.Name) 3201 require.NoError(t, err) 3202 3203 var out1 []*structs.Job 3204 for { 3205 raw := iter1.Next() 3206 if raw == nil { 3207 break 3208 } 3209 out1 = append(out1, raw.(*structs.Job)) 3210 } 3211 3212 var out2 []*structs.Job 3213 for { 3214 raw := iter2.Next() 3215 if raw == nil { 3216 break 3217 } 3218 out2 = append(out2, raw.(*structs.Job)) 3219 } 3220 3221 require.Len(t, out1, 2) 3222 require.Len(t, out2, 2) 3223 3224 for _, job := range out1 { 3225 require.Equal(t, ns1.Name, job.Namespace) 3226 } 3227 for _, job := range out2 { 3228 require.Equal(t, ns2.Name, job.Namespace) 3229 } 3230 3231 index, err := state.Index("jobs") 3232 require.NoError(t, err) 3233 require.EqualValues(t, 1004, index) 3234 require.False(t, watchFired(ws)) 3235 } 3236 3237 func TestStateStore_JobsByPeriodic(t *testing.T) { 3238 ci.Parallel(t) 3239 3240 state := testStateStore(t) 3241 var periodic, nonPeriodic []*structs.Job 3242 3243 for i := 0; i < 10; i++ { 3244 job := mock.Job() 3245 nonPeriodic = append(nonPeriodic, job) 3246 3247 err := state.UpsertJob(structs.MsgTypeTestSetup, 1000+uint64(i), nil, job) 3248 if err != nil { 3249 t.Fatalf("err: %v", err) 3250 } 3251 } 3252 3253 for i := 0; i < 10; i++ { 3254 job := mock.PeriodicJob() 3255 periodic = append(periodic, job) 3256 3257 err := state.UpsertJob(structs.MsgTypeTestSetup, 2000+uint64(i), nil, job) 3258 if err != nil { 3259 t.Fatalf("err: %v", err) 3260 } 3261 } 3262 3263 ws := memdb.NewWatchSet() 3264 iter, err := state.JobsByPeriodic(ws, true) 3265 if err != nil { 3266 t.Fatalf("err: %v", err) 3267 } 3268 3269 var outPeriodic []*structs.Job 3270 for { 3271 raw := iter.Next() 3272 if raw == nil { 3273 break 3274 } 3275 outPeriodic = append(outPeriodic, raw.(*structs.Job)) 3276 } 3277 3278 iter, err = state.JobsByPeriodic(ws, false) 3279 if err != nil { 3280 t.Fatalf("err: %v", err) 3281 } 3282 3283 var outNonPeriodic []*structs.Job 3284 for { 3285 raw := iter.Next() 3286 if raw == nil { 3287 break 3288 } 3289 outNonPeriodic = append(outNonPeriodic, raw.(*structs.Job)) 3290 } 3291 3292 sort.Sort(JobIDSort(periodic)) 3293 sort.Sort(JobIDSort(nonPeriodic)) 3294 sort.Sort(JobIDSort(outPeriodic)) 3295 sort.Sort(JobIDSort(outNonPeriodic)) 3296 3297 if !reflect.DeepEqual(periodic, outPeriodic) { 3298 t.Fatalf("bad: %#v %#v", periodic, outPeriodic) 3299 } 3300 3301 if !reflect.DeepEqual(nonPeriodic, outNonPeriodic) { 3302 t.Fatalf("bad: %#v %#v", nonPeriodic, outNonPeriodic) 3303 } 3304 if watchFired(ws) { 3305 t.Fatalf("bad") 3306 } 3307 } 3308 3309 func TestStateStore_JobsByScheduler(t *testing.T) { 3310 ci.Parallel(t) 3311 3312 state := testStateStore(t) 3313 var serviceJobs []*structs.Job 3314 var sysJobs []*structs.Job 3315 3316 for i := 0; i < 10; i++ { 3317 job := mock.Job() 3318 serviceJobs = append(serviceJobs, job) 3319 3320 err := state.UpsertJob(structs.MsgTypeTestSetup, 1000+uint64(i), nil, job) 3321 if err != nil { 3322 t.Fatalf("err: %v", err) 3323 } 3324 } 3325 3326 for i := 0; i < 10; i++ { 3327 job := mock.SystemJob() 3328 job.Status = structs.JobStatusRunning 3329 sysJobs = append(sysJobs, job) 3330 3331 err := state.UpsertJob(structs.MsgTypeTestSetup, 2000+uint64(i), nil, job) 3332 if err != nil { 3333 t.Fatalf("err: %v", err) 3334 } 3335 } 3336 3337 ws := memdb.NewWatchSet() 3338 iter, err := state.JobsByScheduler(ws, "service") 3339 if err != nil { 3340 t.Fatalf("err: %v", err) 3341 } 3342 3343 var outService []*structs.Job 3344 for { 3345 raw := iter.Next() 3346 if raw == nil { 3347 break 3348 } 3349 outService = append(outService, raw.(*structs.Job)) 3350 } 3351 3352 iter, err = state.JobsByScheduler(ws, "system") 3353 if err != nil { 3354 t.Fatalf("err: %v", err) 3355 } 3356 3357 var outSystem []*structs.Job 3358 for { 3359 raw := iter.Next() 3360 if raw == nil { 3361 break 3362 } 3363 outSystem = append(outSystem, raw.(*structs.Job)) 3364 } 3365 3366 sort.Sort(JobIDSort(serviceJobs)) 3367 sort.Sort(JobIDSort(sysJobs)) 3368 sort.Sort(JobIDSort(outService)) 3369 sort.Sort(JobIDSort(outSystem)) 3370 3371 if !reflect.DeepEqual(serviceJobs, outService) { 3372 t.Fatalf("bad: %#v %#v", serviceJobs, outService) 3373 } 3374 3375 if !reflect.DeepEqual(sysJobs, outSystem) { 3376 t.Fatalf("bad: %#v %#v", sysJobs, outSystem) 3377 } 3378 if watchFired(ws) { 3379 t.Fatalf("bad") 3380 } 3381 } 3382 3383 func TestStateStore_JobsByGC(t *testing.T) { 3384 ci.Parallel(t) 3385 3386 state := testStateStore(t) 3387 gc, nonGc := make(map[string]struct{}), make(map[string]struct{}) 3388 3389 for i := 0; i < 20; i++ { 3390 var job *structs.Job 3391 if i%2 == 0 { 3392 job = mock.Job() 3393 } else { 3394 job = mock.PeriodicJob() 3395 } 3396 nonGc[job.ID] = struct{}{} 3397 3398 if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000+uint64(i), nil, job); err != nil { 3399 t.Fatalf("err: %v", err) 3400 } 3401 } 3402 3403 for i := 0; i < 20; i += 2 { 3404 job := mock.Job() 3405 job.Type = structs.JobTypeBatch 3406 gc[job.ID] = struct{}{} 3407 3408 if err := state.UpsertJob(structs.MsgTypeTestSetup, 2000+uint64(i), nil, job); err != nil { 3409 t.Fatalf("err: %v", err) 3410 } 3411 3412 // Create an eval for it 3413 eval := mock.Eval() 3414 eval.JobID = job.ID 3415 eval.Status = structs.EvalStatusComplete 3416 if err := state.UpsertEvals(structs.MsgTypeTestSetup, 2000+uint64(i+1), []*structs.Evaluation{eval}); err != nil { 3417 t.Fatalf("err: %v", err) 3418 } 3419 3420 } 3421 3422 ws := memdb.NewWatchSet() 3423 iter, err := state.JobsByGC(ws, true) 3424 if err != nil { 3425 t.Fatalf("err: %v", err) 3426 } 3427 3428 outGc := make(map[string]struct{}) 3429 for i := iter.Next(); i != nil; i = iter.Next() { 3430 j := i.(*structs.Job) 3431 outGc[j.ID] = struct{}{} 3432 } 3433 3434 iter, err = state.JobsByGC(ws, false) 3435 if err != nil { 3436 t.Fatalf("err: %v", err) 3437 } 3438 3439 outNonGc := make(map[string]struct{}) 3440 for i := iter.Next(); i != nil; i = iter.Next() { 3441 j := i.(*structs.Job) 3442 outNonGc[j.ID] = struct{}{} 3443 } 3444 3445 if !reflect.DeepEqual(gc, outGc) { 3446 t.Fatalf("bad: %#v %#v", gc, outGc) 3447 } 3448 3449 if !reflect.DeepEqual(nonGc, outNonGc) { 3450 t.Fatalf("bad: %#v %#v", nonGc, outNonGc) 3451 } 3452 if watchFired(ws) { 3453 t.Fatalf("bad") 3454 } 3455 } 3456 3457 func TestStateStore_UpsertPeriodicLaunch(t *testing.T) { 3458 ci.Parallel(t) 3459 3460 state := testStateStore(t) 3461 job := mock.Job() 3462 launch := &structs.PeriodicLaunch{ 3463 ID: job.ID, 3464 Namespace: job.Namespace, 3465 Launch: time.Now(), 3466 } 3467 3468 // Create a watchset so we can test that upsert fires the watch 3469 ws := memdb.NewWatchSet() 3470 if _, err := state.PeriodicLaunchByID(ws, job.Namespace, launch.ID); err != nil { 3471 t.Fatalf("bad: %v", err) 3472 } 3473 3474 err := state.UpsertPeriodicLaunch(1000, launch) 3475 if err != nil { 3476 t.Fatalf("err: %v", err) 3477 } 3478 3479 if !watchFired(ws) { 3480 t.Fatalf("bad") 3481 } 3482 3483 ws = memdb.NewWatchSet() 3484 out, err := state.PeriodicLaunchByID(ws, job.Namespace, job.ID) 3485 if err != nil { 3486 t.Fatalf("err: %v", err) 3487 } 3488 if out.CreateIndex != 1000 { 3489 t.Fatalf("bad: %#v", out) 3490 } 3491 if out.ModifyIndex != 1000 { 3492 t.Fatalf("bad: %#v", out) 3493 } 3494 3495 if !reflect.DeepEqual(launch, out) { 3496 t.Fatalf("bad: %#v %#v", job, out) 3497 } 3498 3499 index, err := state.Index("periodic_launch") 3500 if err != nil { 3501 t.Fatalf("err: %v", err) 3502 } 3503 if index != 1000 { 3504 t.Fatalf("bad: %d", index) 3505 } 3506 3507 if watchFired(ws) { 3508 t.Fatalf("bad") 3509 } 3510 } 3511 3512 func TestStateStore_UpdateUpsertPeriodicLaunch(t *testing.T) { 3513 ci.Parallel(t) 3514 3515 state := testStateStore(t) 3516 job := mock.Job() 3517 launch := &structs.PeriodicLaunch{ 3518 ID: job.ID, 3519 Namespace: job.Namespace, 3520 Launch: time.Now(), 3521 } 3522 3523 err := state.UpsertPeriodicLaunch(1000, launch) 3524 if err != nil { 3525 t.Fatalf("err: %v", err) 3526 } 3527 3528 // Create a watchset so we can test that upsert fires the watch 3529 ws := memdb.NewWatchSet() 3530 if _, err := state.PeriodicLaunchByID(ws, job.Namespace, launch.ID); err != nil { 3531 t.Fatalf("bad: %v", err) 3532 } 3533 3534 launch2 := &structs.PeriodicLaunch{ 3535 ID: job.ID, 3536 Namespace: job.Namespace, 3537 Launch: launch.Launch.Add(1 * time.Second), 3538 } 3539 err = state.UpsertPeriodicLaunch(1001, launch2) 3540 if err != nil { 3541 t.Fatalf("err: %v", err) 3542 } 3543 3544 if !watchFired(ws) { 3545 t.Fatalf("bad") 3546 } 3547 3548 ws = memdb.NewWatchSet() 3549 out, err := state.PeriodicLaunchByID(ws, job.Namespace, job.ID) 3550 if err != nil { 3551 t.Fatalf("err: %v", err) 3552 } 3553 if out.CreateIndex != 1000 { 3554 t.Fatalf("bad: %#v", out) 3555 } 3556 if out.ModifyIndex != 1001 { 3557 t.Fatalf("bad: %#v", out) 3558 } 3559 3560 if !reflect.DeepEqual(launch2, out) { 3561 t.Fatalf("bad: %#v %#v", launch2, out) 3562 } 3563 3564 index, err := state.Index("periodic_launch") 3565 if err != nil { 3566 t.Fatalf("err: %v", err) 3567 } 3568 if index != 1001 { 3569 t.Fatalf("bad: %d", index) 3570 } 3571 3572 if watchFired(ws) { 3573 t.Fatalf("bad") 3574 } 3575 } 3576 3577 func TestStateStore_DeletePeriodicLaunch(t *testing.T) { 3578 ci.Parallel(t) 3579 3580 state := testStateStore(t) 3581 job := mock.Job() 3582 launch := &structs.PeriodicLaunch{ 3583 ID: job.ID, 3584 Namespace: job.Namespace, 3585 Launch: time.Now(), 3586 } 3587 3588 err := state.UpsertPeriodicLaunch(1000, launch) 3589 if err != nil { 3590 t.Fatalf("err: %v", err) 3591 } 3592 3593 // Create a watchset so we can test that delete fires the watch 3594 ws := memdb.NewWatchSet() 3595 if _, err := state.PeriodicLaunchByID(ws, job.Namespace, launch.ID); err != nil { 3596 t.Fatalf("bad: %v", err) 3597 } 3598 3599 err = state.DeletePeriodicLaunch(1001, launch.Namespace, launch.ID) 3600 if err != nil { 3601 t.Fatalf("err: %v", err) 3602 } 3603 3604 if !watchFired(ws) { 3605 t.Fatalf("bad") 3606 } 3607 3608 ws = memdb.NewWatchSet() 3609 out, err := state.PeriodicLaunchByID(ws, job.Namespace, job.ID) 3610 if err != nil { 3611 t.Fatalf("err: %v", err) 3612 } 3613 3614 if out != nil { 3615 t.Fatalf("bad: %#v %#v", job, out) 3616 } 3617 3618 index, err := state.Index("periodic_launch") 3619 if err != nil { 3620 t.Fatalf("err: %v", err) 3621 } 3622 if index != 1001 { 3623 t.Fatalf("bad: %d", index) 3624 } 3625 3626 if watchFired(ws) { 3627 t.Fatalf("bad") 3628 } 3629 } 3630 3631 func TestStateStore_PeriodicLaunches(t *testing.T) { 3632 ci.Parallel(t) 3633 3634 state := testStateStore(t) 3635 var launches []*structs.PeriodicLaunch 3636 3637 for i := 0; i < 10; i++ { 3638 job := mock.Job() 3639 launch := &structs.PeriodicLaunch{ 3640 ID: job.ID, 3641 Namespace: job.Namespace, 3642 Launch: time.Now(), 3643 } 3644 launches = append(launches, launch) 3645 3646 err := state.UpsertPeriodicLaunch(1000+uint64(i), launch) 3647 if err != nil { 3648 t.Fatalf("err: %v", err) 3649 } 3650 } 3651 3652 ws := memdb.NewWatchSet() 3653 iter, err := state.PeriodicLaunches(ws) 3654 if err != nil { 3655 t.Fatalf("err: %v", err) 3656 } 3657 3658 out := make(map[string]*structs.PeriodicLaunch, 10) 3659 for { 3660 raw := iter.Next() 3661 if raw == nil { 3662 break 3663 } 3664 launch := raw.(*structs.PeriodicLaunch) 3665 if _, ok := out[launch.ID]; ok { 3666 t.Fatalf("duplicate: %v", launch.ID) 3667 } 3668 3669 out[launch.ID] = launch 3670 } 3671 3672 for _, launch := range launches { 3673 l, ok := out[launch.ID] 3674 if !ok { 3675 t.Fatalf("bad %v", launch.ID) 3676 } 3677 3678 if !reflect.DeepEqual(launch, l) { 3679 t.Fatalf("bad: %#v %#v", launch, l) 3680 } 3681 3682 delete(out, launch.ID) 3683 } 3684 3685 if len(out) != 0 { 3686 t.Fatalf("leftover: %#v", out) 3687 } 3688 3689 if watchFired(ws) { 3690 t.Fatalf("bad") 3691 } 3692 } 3693 3694 // TestStateStore_CSIVolume checks register, list and deregister for csi_volumes 3695 func TestStateStore_CSIVolume(t *testing.T) { 3696 state := testStateStore(t) 3697 index := uint64(1000) 3698 3699 // Volume IDs 3700 vol0, vol1 := uuid.Generate(), uuid.Generate() 3701 3702 // Create a node running a healthy instance of the plugin 3703 node := mock.Node() 3704 pluginID := "minnie" 3705 alloc := mock.Alloc() 3706 alloc.DesiredStatus = "run" 3707 alloc.ClientStatus = "running" 3708 alloc.NodeID = node.ID 3709 alloc.Job.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{ 3710 "foo": { 3711 Name: "foo", 3712 Source: vol0, 3713 Type: "csi", 3714 }, 3715 } 3716 3717 node.CSINodePlugins = map[string]*structs.CSIInfo{ 3718 pluginID: { 3719 PluginID: pluginID, 3720 AllocID: alloc.ID, 3721 Healthy: true, 3722 HealthDescription: "healthy", 3723 RequiresControllerPlugin: false, 3724 RequiresTopologies: false, 3725 NodeInfo: &structs.CSINodeInfo{ 3726 ID: node.ID, 3727 MaxVolumes: 64, 3728 RequiresNodeStageVolume: true, 3729 }, 3730 }, 3731 } 3732 3733 index++ 3734 err := state.UpsertNode(structs.MsgTypeTestSetup, index, node) 3735 require.NoError(t, err) 3736 defer state.DeleteNode(structs.MsgTypeTestSetup, 9999, []string{pluginID}) 3737 3738 index++ 3739 err = state.UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc}) 3740 require.NoError(t, err) 3741 3742 ns := structs.DefaultNamespace 3743 3744 v0 := structs.NewCSIVolume("foo", index) 3745 v0.ID = vol0 3746 v0.Namespace = ns 3747 v0.PluginID = "minnie" 3748 v0.Schedulable = true 3749 v0.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter 3750 v0.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem 3751 v0.RequestedCapabilities = []*structs.CSIVolumeCapability{{ 3752 AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, 3753 AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, 3754 }} 3755 3756 index++ 3757 v1 := structs.NewCSIVolume("foo", index) 3758 v1.ID = vol1 3759 v1.Namespace = ns 3760 v1.PluginID = "adam" 3761 v1.Schedulable = true 3762 v1.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter 3763 v1.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem 3764 v1.RequestedCapabilities = []*structs.CSIVolumeCapability{{ 3765 AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, 3766 AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, 3767 }} 3768 3769 index++ 3770 err = state.UpsertCSIVolume(index, []*structs.CSIVolume{v0, v1}) 3771 require.NoError(t, err) 3772 3773 // volume registration is idempotent, unless identies are changed 3774 index++ 3775 err = state.UpsertCSIVolume(index, []*structs.CSIVolume{v0, v1}) 3776 require.NoError(t, err) 3777 3778 index++ 3779 v2 := v0.Copy() 3780 v2.PluginID = "new-id" 3781 err = state.UpsertCSIVolume(index, []*structs.CSIVolume{v2}) 3782 require.Error(t, err, fmt.Sprintf("volume exists: %s", v0.ID)) 3783 3784 ws := memdb.NewWatchSet() 3785 iter, err := state.CSIVolumesByNamespace(ws, ns, "") 3786 require.NoError(t, err) 3787 3788 slurp := func(iter memdb.ResultIterator) (vs []*structs.CSIVolume) { 3789 for { 3790 raw := iter.Next() 3791 if raw == nil { 3792 break 3793 } 3794 vol := raw.(*structs.CSIVolume) 3795 vs = append(vs, vol) 3796 } 3797 return vs 3798 } 3799 3800 vs := slurp(iter) 3801 require.Equal(t, 2, len(vs)) 3802 3803 ws = memdb.NewWatchSet() 3804 iter, err = state.CSIVolumesByPluginID(ws, ns, "", "minnie") 3805 require.NoError(t, err) 3806 vs = slurp(iter) 3807 require.Equal(t, 1, len(vs)) 3808 3809 ws = memdb.NewWatchSet() 3810 iter, err = state.CSIVolumesByNodeID(ws, "", node.ID) 3811 require.NoError(t, err) 3812 vs = slurp(iter) 3813 require.Equal(t, 1, len(vs)) 3814 3815 // Allocs 3816 a0 := mock.Alloc() 3817 a1 := mock.Alloc() 3818 index++ 3819 err = state.UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{a0, a1}) 3820 require.NoError(t, err) 3821 3822 // Claims 3823 r := structs.CSIVolumeClaimRead 3824 w := structs.CSIVolumeClaimWrite 3825 u := structs.CSIVolumeClaimGC 3826 claim0 := &structs.CSIVolumeClaim{ 3827 AllocationID: a0.ID, 3828 NodeID: node.ID, 3829 Mode: r, 3830 } 3831 claim1 := &structs.CSIVolumeClaim{ 3832 AllocationID: a1.ID, 3833 NodeID: node.ID, 3834 Mode: w, 3835 } 3836 3837 index++ 3838 err = state.CSIVolumeClaim(index, ns, vol0, claim0) 3839 require.NoError(t, err) 3840 index++ 3841 err = state.CSIVolumeClaim(index, ns, vol0, claim1) 3842 require.NoError(t, err) 3843 3844 ws = memdb.NewWatchSet() 3845 iter, err = state.CSIVolumesByPluginID(ws, ns, "", "minnie") 3846 require.NoError(t, err) 3847 vs = slurp(iter) 3848 require.False(t, vs[0].HasFreeWriteClaims()) 3849 3850 claim2 := new(structs.CSIVolumeClaim) 3851 *claim2 = *claim0 3852 claim2.Mode = u 3853 err = state.CSIVolumeClaim(2, ns, vol0, claim2) 3854 require.NoError(t, err) 3855 ws = memdb.NewWatchSet() 3856 iter, err = state.CSIVolumesByPluginID(ws, ns, "", "minnie") 3857 require.NoError(t, err) 3858 vs = slurp(iter) 3859 require.True(t, vs[0].ReadSchedulable()) 3860 3861 // registration is an error when the volume is in use 3862 index++ 3863 err = state.UpsertCSIVolume(index, []*structs.CSIVolume{v0}) 3864 require.Error(t, err, "volume re-registered while in use") 3865 // as is deregistration 3866 index++ 3867 err = state.CSIVolumeDeregister(index, ns, []string{vol0}, false) 3868 require.Error(t, err, "volume deregistered while in use") 3869 3870 // even if forced, because we have a non-terminal claim 3871 index++ 3872 err = state.CSIVolumeDeregister(index, ns, []string{vol0}, true) 3873 require.Error(t, err, "volume force deregistered while in use") 3874 3875 // we use the ID, not a prefix 3876 index++ 3877 err = state.CSIVolumeDeregister(index, ns, []string{"fo"}, true) 3878 require.Error(t, err, "volume deregistered by prefix") 3879 3880 // release claims to unblock deregister 3881 index++ 3882 claim3 := new(structs.CSIVolumeClaim) 3883 *claim3 = *claim2 3884 claim3.State = structs.CSIVolumeClaimStateReadyToFree 3885 err = state.CSIVolumeClaim(index, ns, vol0, claim3) 3886 require.NoError(t, err) 3887 index++ 3888 claim1.Mode = u 3889 claim1.State = structs.CSIVolumeClaimStateReadyToFree 3890 err = state.CSIVolumeClaim(index, ns, vol0, claim1) 3891 require.NoError(t, err) 3892 3893 index++ 3894 err = state.CSIVolumeDeregister(index, ns, []string{vol0}, false) 3895 require.NoError(t, err) 3896 3897 // List, now omitting the deregistered volume 3898 ws = memdb.NewWatchSet() 3899 iter, err = state.CSIVolumesByPluginID(ws, ns, "", "minnie") 3900 require.NoError(t, err) 3901 vs = slurp(iter) 3902 require.Equal(t, 0, len(vs)) 3903 3904 ws = memdb.NewWatchSet() 3905 iter, err = state.CSIVolumesByNamespace(ws, ns, "") 3906 require.NoError(t, err) 3907 vs = slurp(iter) 3908 require.Equal(t, 1, len(vs)) 3909 } 3910 3911 func TestStateStore_CSIPlugin_Lifecycle(t *testing.T) { 3912 ci.Parallel(t) 3913 3914 store := testStateStore(t) 3915 plugID := "foo" 3916 var err error 3917 var controllerJobID string 3918 var nodeJobID string 3919 allocIDs := []string{} 3920 3921 type pluginCounts struct { 3922 controllerFingerprints int 3923 nodeFingerprints int 3924 controllersHealthy int 3925 nodesHealthy int 3926 controllersExpected int 3927 nodesExpected int 3928 } 3929 3930 // helper function for test assertions 3931 checkPlugin := func(counts pluginCounts) *structs.CSIPlugin { 3932 plug, err := store.CSIPluginByID(memdb.NewWatchSet(), plugID) 3933 require.NotNil(t, plug, "plugin was nil") 3934 require.NoError(t, err) 3935 require.Equal(t, counts.controllerFingerprints, len(plug.Controllers), "controllers fingerprinted") 3936 require.Equal(t, counts.nodeFingerprints, len(plug.Nodes), "nodes fingerprinted") 3937 require.Equal(t, counts.controllersHealthy, plug.ControllersHealthy, "controllers healthy") 3938 require.Equal(t, counts.nodesHealthy, plug.NodesHealthy, "nodes healthy") 3939 require.Equal(t, counts.controllersExpected, plug.ControllersExpected, "controllers expected") 3940 require.Equal(t, counts.nodesExpected, plug.NodesExpected, "nodes expected") 3941 return plug.Copy() 3942 } 3943 3944 type allocUpdateKind int 3945 const ( 3946 SERVER allocUpdateKind = iota 3947 CLIENT 3948 ) 3949 3950 // helper function calling client-side update with with 3951 // UpsertAllocs and/or UpdateAllocsFromClient, depending on which 3952 // status(es) are set 3953 updateAllocsFn := func(allocIDs []string, kind allocUpdateKind, 3954 transform func(alloc *structs.Allocation)) []*structs.Allocation { 3955 allocs := []*structs.Allocation{} 3956 ws := memdb.NewWatchSet() 3957 for _, id := range allocIDs { 3958 alloc, err := store.AllocByID(ws, id) 3959 require.NoError(t, err) 3960 alloc = alloc.Copy() 3961 transform(alloc) 3962 allocs = append(allocs, alloc) 3963 } 3964 switch kind { 3965 case SERVER: 3966 err = store.UpsertAllocs(structs.MsgTypeTestSetup, nextIndex(store), allocs) 3967 case CLIENT: 3968 // this is somewhat artificial b/c we get alloc updates 3969 // from multiple nodes concurrently but not in a single 3970 // RPC call. But this guarantees we'll trigger any nested 3971 // transaction setup bugs 3972 err = store.UpdateAllocsFromClient(structs.MsgTypeTestSetup, nextIndex(store), allocs) 3973 } 3974 require.NoError(t, err) 3975 return allocs 3976 } 3977 3978 // helper function calling UpsertNode for fingerprinting 3979 updateNodeFn := func(nodeID string, transform func(node *structs.Node)) { 3980 ws := memdb.NewWatchSet() 3981 node, _ := store.NodeByID(ws, nodeID) 3982 node = node.Copy() 3983 transform(node) 3984 err = store.UpsertNode(structs.MsgTypeTestSetup, nextIndex(store), node) 3985 require.NoError(t, err) 3986 } 3987 3988 nodes := []*structs.Node{mock.Node(), mock.Node(), mock.Node()} 3989 for _, node := range nodes { 3990 err = store.UpsertNode(structs.MsgTypeTestSetup, nextIndex(store), node) 3991 require.NoError(t, err) 3992 } 3993 3994 // Note: these are all subtests for clarity but are expected to be 3995 // ordered, because they walk through all the phases of plugin 3996 // instance registration and deregistration 3997 3998 t.Run("register plugin jobs", func(t *testing.T) { 3999 4000 controllerJob := mock.CSIPluginJob(structs.CSIPluginTypeController, plugID) 4001 controllerJobID = controllerJob.ID 4002 err = store.UpsertJob(structs.MsgTypeTestSetup, nextIndex(store), nil, controllerJob) 4003 4004 nodeJob := mock.CSIPluginJob(structs.CSIPluginTypeNode, plugID) 4005 nodeJobID = nodeJob.ID 4006 err = store.UpsertJob(structs.MsgTypeTestSetup, nextIndex(store), nil, nodeJob) 4007 4008 // plugins created, but no fingerprints or allocs yet 4009 // note: there's no job summary yet, but we know the task 4010 // group count for the non-system job 4011 // 4012 // TODO: that's the current code but we really should be able 4013 // to figure out the system jobs too 4014 plug := checkPlugin(pluginCounts{ 4015 controllerFingerprints: 0, 4016 nodeFingerprints: 0, 4017 controllersHealthy: 0, 4018 nodesHealthy: 0, 4019 controllersExpected: 2, 4020 nodesExpected: 0, 4021 }) 4022 require.False(t, plug.ControllerRequired) 4023 }) 4024 4025 t.Run("plan apply upserts allocations", func(t *testing.T) { 4026 4027 allocForJob := func(job *structs.Job) *structs.Allocation { 4028 alloc := mock.Alloc() 4029 alloc.Job = job.Copy() 4030 alloc.JobID = job.ID 4031 alloc.TaskGroup = job.TaskGroups[0].Name 4032 alloc.DesiredStatus = structs.AllocDesiredStatusRun 4033 alloc.ClientStatus = structs.AllocClientStatusPending 4034 return alloc 4035 } 4036 4037 ws := memdb.NewWatchSet() 4038 controllerJob, _ := store.JobByID(ws, structs.DefaultNamespace, controllerJobID) 4039 controllerAlloc0 := allocForJob(controllerJob) 4040 controllerAlloc0.NodeID = nodes[0].ID 4041 allocIDs = append(allocIDs, controllerAlloc0.ID) 4042 4043 controllerAlloc1 := allocForJob(controllerJob) 4044 controllerAlloc1.NodeID = nodes[1].ID 4045 allocIDs = append(allocIDs, controllerAlloc1.ID) 4046 4047 allocs := []*structs.Allocation{controllerAlloc0, controllerAlloc1} 4048 4049 nodeJob, _ := store.JobByID(ws, structs.DefaultNamespace, nodeJobID) 4050 for _, node := range nodes { 4051 nodeAlloc := allocForJob(nodeJob) 4052 nodeAlloc.NodeID = node.ID 4053 allocIDs = append(allocIDs, nodeAlloc.ID) 4054 allocs = append(allocs, nodeAlloc) 4055 } 4056 err = store.UpsertAllocs(structs.MsgTypeTestSetup, nextIndex(store), allocs) 4057 require.NoError(t, err) 4058 4059 // node plugin now has expected counts too 4060 plug := checkPlugin(pluginCounts{ 4061 controllerFingerprints: 0, 4062 nodeFingerprints: 0, 4063 controllersHealthy: 0, 4064 nodesHealthy: 0, 4065 controllersExpected: 2, 4066 nodesExpected: 3, 4067 }) 4068 require.False(t, plug.ControllerRequired) 4069 }) 4070 4071 t.Run("client upserts alloc status", func(t *testing.T) { 4072 4073 updateAllocsFn(allocIDs, CLIENT, func(alloc *structs.Allocation) { 4074 alloc.ClientStatus = structs.AllocClientStatusRunning 4075 }) 4076 4077 // plugin still has allocs but no fingerprints 4078 plug := checkPlugin(pluginCounts{ 4079 controllerFingerprints: 0, 4080 nodeFingerprints: 0, 4081 controllersHealthy: 0, 4082 nodesHealthy: 0, 4083 controllersExpected: 2, 4084 nodesExpected: 3, 4085 }) 4086 require.False(t, plug.ControllerRequired) 4087 }) 4088 4089 t.Run("client upserts node fingerprints", func(t *testing.T) { 4090 4091 nodeFingerprint := map[string]*structs.CSIInfo{ 4092 plugID: { 4093 PluginID: plugID, 4094 Healthy: true, 4095 UpdateTime: time.Now(), 4096 RequiresControllerPlugin: true, 4097 RequiresTopologies: false, 4098 NodeInfo: &structs.CSINodeInfo{}, 4099 }, 4100 } 4101 for _, node := range nodes { 4102 updateNodeFn(node.ID, func(node *structs.Node) { 4103 node.CSINodePlugins = nodeFingerprint 4104 }) 4105 } 4106 4107 controllerFingerprint := map[string]*structs.CSIInfo{ 4108 plugID: { 4109 PluginID: plugID, 4110 Healthy: true, 4111 UpdateTime: time.Now(), 4112 RequiresControllerPlugin: true, 4113 RequiresTopologies: false, 4114 ControllerInfo: &structs.CSIControllerInfo{ 4115 SupportsReadOnlyAttach: true, 4116 SupportsListVolumes: true, 4117 }, 4118 }, 4119 } 4120 for n := 0; n < 2; n++ { 4121 updateNodeFn(nodes[n].ID, func(node *structs.Node) { 4122 node.CSIControllerPlugins = controllerFingerprint 4123 }) 4124 } 4125 4126 // plugins have been fingerprinted so we have healthy counts 4127 plug := checkPlugin(pluginCounts{ 4128 controllerFingerprints: 2, 4129 nodeFingerprints: 3, 4130 controllersHealthy: 2, 4131 nodesHealthy: 3, 4132 controllersExpected: 2, 4133 nodesExpected: 3, 4134 }) 4135 require.True(t, plug.ControllerRequired) 4136 }) 4137 4138 t.Run("node marked for drain", func(t *testing.T) { 4139 ws := memdb.NewWatchSet() 4140 nodeAllocs, err := store.AllocsByNode(ws, nodes[0].ID) 4141 require.NoError(t, err) 4142 require.Len(t, nodeAllocs, 2) 4143 4144 updateAllocsFn([]string{nodeAllocs[0].ID, nodeAllocs[1].ID}, 4145 SERVER, func(alloc *structs.Allocation) { 4146 alloc.DesiredStatus = structs.AllocDesiredStatusStop 4147 }) 4148 4149 plug := checkPlugin(pluginCounts{ 4150 controllerFingerprints: 2, 4151 nodeFingerprints: 3, 4152 controllersHealthy: 2, 4153 nodesHealthy: 3, 4154 controllersExpected: 2, // job summary hasn't changed 4155 nodesExpected: 3, // job summary hasn't changed 4156 }) 4157 require.True(t, plug.ControllerRequired) 4158 }) 4159 4160 t.Run("client removes fingerprints after node drain", func(t *testing.T) { 4161 updateNodeFn(nodes[0].ID, func(node *structs.Node) { 4162 node.CSIControllerPlugins = nil 4163 node.CSINodePlugins = nil 4164 }) 4165 4166 plug := checkPlugin(pluginCounts{ 4167 controllerFingerprints: 1, 4168 nodeFingerprints: 2, 4169 controllersHealthy: 1, 4170 nodesHealthy: 2, 4171 controllersExpected: 2, 4172 nodesExpected: 3, 4173 }) 4174 require.True(t, plug.ControllerRequired) 4175 }) 4176 4177 t.Run("client updates alloc status to stopped after node drain", func(t *testing.T) { 4178 nodeAllocs, err := store.AllocsByNode(memdb.NewWatchSet(), nodes[0].ID) 4179 require.NoError(t, err) 4180 require.Len(t, nodeAllocs, 2) 4181 4182 updateAllocsFn([]string{nodeAllocs[0].ID, nodeAllocs[1].ID}, CLIENT, 4183 func(alloc *structs.Allocation) { 4184 alloc.ClientStatus = structs.AllocClientStatusComplete 4185 }) 4186 4187 plug := checkPlugin(pluginCounts{ 4188 controllerFingerprints: 1, 4189 nodeFingerprints: 2, 4190 controllersHealthy: 1, 4191 nodesHealthy: 2, 4192 controllersExpected: 2, // still 2 because count=2 4193 nodesExpected: 2, // has to use nodes we're actually placed on 4194 }) 4195 require.True(t, plug.ControllerRequired) 4196 }) 4197 4198 t.Run("job stop with purge", func(t *testing.T) { 4199 4200 vol := &structs.CSIVolume{ 4201 ID: uuid.Generate(), 4202 Namespace: structs.DefaultNamespace, 4203 PluginID: plugID, 4204 } 4205 err = store.UpsertCSIVolume(nextIndex(store), []*structs.CSIVolume{vol}) 4206 require.NoError(t, err) 4207 4208 err = store.DeleteJob(nextIndex(store), structs.DefaultNamespace, controllerJobID) 4209 require.NoError(t, err) 4210 4211 err = store.DeleteJob(nextIndex(store), structs.DefaultNamespace, nodeJobID) 4212 require.NoError(t, err) 4213 4214 plug := checkPlugin(pluginCounts{ 4215 controllerFingerprints: 1, // no changes till we get fingerprint 4216 nodeFingerprints: 2, 4217 controllersHealthy: 1, 4218 nodesHealthy: 2, 4219 controllersExpected: 0, 4220 nodesExpected: 0, 4221 }) 4222 require.True(t, plug.ControllerRequired) 4223 require.False(t, plug.IsEmpty()) 4224 4225 updateAllocsFn(allocIDs, SERVER, 4226 func(alloc *structs.Allocation) { 4227 alloc.DesiredStatus = structs.AllocDesiredStatusStop 4228 }) 4229 4230 updateAllocsFn(allocIDs, CLIENT, 4231 func(alloc *structs.Allocation) { 4232 alloc.ClientStatus = structs.AllocClientStatusComplete 4233 }) 4234 4235 plug = checkPlugin(pluginCounts{ 4236 controllerFingerprints: 1, 4237 nodeFingerprints: 2, 4238 controllersHealthy: 1, 4239 nodesHealthy: 2, 4240 controllersExpected: 0, 4241 nodesExpected: 0, 4242 }) 4243 require.True(t, plug.ControllerRequired) 4244 require.False(t, plug.IsEmpty()) 4245 4246 for _, node := range nodes { 4247 updateNodeFn(node.ID, func(node *structs.Node) { 4248 node.CSIControllerPlugins = nil 4249 }) 4250 } 4251 4252 plug = checkPlugin(pluginCounts{ 4253 controllerFingerprints: 0, 4254 nodeFingerprints: 2, // haven't removed fingerprints yet 4255 controllersHealthy: 0, 4256 nodesHealthy: 2, 4257 controllersExpected: 0, 4258 nodesExpected: 0, 4259 }) 4260 require.True(t, plug.ControllerRequired) 4261 require.False(t, plug.IsEmpty()) 4262 4263 for _, node := range nodes { 4264 updateNodeFn(node.ID, func(node *structs.Node) { 4265 node.CSINodePlugins = nil 4266 }) 4267 } 4268 4269 ws := memdb.NewWatchSet() 4270 plug, err := store.CSIPluginByID(ws, plugID) 4271 require.NoError(t, err) 4272 require.Nil(t, plug, "plugin was not deleted") 4273 4274 vol, err = store.CSIVolumeByID(ws, vol.Namespace, vol.ID) 4275 require.NoError(t, err) 4276 require.NotNil(t, vol, "volume should be queryable even if plugin is deleted") 4277 require.False(t, vol.Schedulable) 4278 }) 4279 } 4280 4281 func TestStateStore_Indexes(t *testing.T) { 4282 ci.Parallel(t) 4283 4284 state := testStateStore(t) 4285 node := mock.Node() 4286 4287 err := state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) 4288 if err != nil { 4289 t.Fatalf("err: %v", err) 4290 } 4291 4292 iter, err := state.Indexes() 4293 if err != nil { 4294 t.Fatalf("err: %v", err) 4295 } 4296 4297 var out []*IndexEntry 4298 for { 4299 raw := iter.Next() 4300 if raw == nil { 4301 break 4302 } 4303 out = append(out, raw.(*IndexEntry)) 4304 } 4305 4306 expect := &IndexEntry{"nodes", 1000} 4307 if l := len(out); l < 1 { 4308 t.Fatalf("unexpected number of index entries: %v", pretty.Sprint(out)) 4309 } 4310 4311 for _, index := range out { 4312 if index.Key != expect.Key { 4313 continue 4314 } 4315 if index.Value != expect.Value { 4316 t.Fatalf("bad index; got %d; want %d", index.Value, expect.Value) 4317 } 4318 4319 // We matched 4320 return 4321 } 4322 4323 t.Fatal("did not find expected index entry") 4324 } 4325 4326 func TestStateStore_LatestIndex(t *testing.T) { 4327 ci.Parallel(t) 4328 4329 state := testStateStore(t) 4330 4331 if err := state.UpsertNode(structs.MsgTypeTestSetup, 1000, mock.Node()); err != nil { 4332 t.Fatalf("err: %v", err) 4333 } 4334 4335 exp := uint64(2000) 4336 if err := state.UpsertJob(structs.MsgTypeTestSetup, exp, nil, mock.Job()); err != nil { 4337 t.Fatalf("err: %v", err) 4338 } 4339 4340 latest, err := state.LatestIndex() 4341 if err != nil { 4342 t.Fatalf("err: %v", err) 4343 } 4344 4345 if latest != exp { 4346 t.Fatalf("LatestIndex() returned %d; want %d", latest, exp) 4347 } 4348 } 4349 4350 func TestStateStore_UpsertEvals_Eval(t *testing.T) { 4351 ci.Parallel(t) 4352 4353 state := testStateStore(t) 4354 eval := mock.Eval() 4355 4356 // Create a watchset so we can test that upsert fires the watch 4357 ws := memdb.NewWatchSet() 4358 if _, err := state.EvalByID(ws, eval.ID); err != nil { 4359 t.Fatalf("bad: %v", err) 4360 } 4361 4362 err := state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval}) 4363 if err != nil { 4364 t.Fatalf("err: %v", err) 4365 } 4366 4367 if !watchFired(ws) { 4368 t.Fatalf("bad") 4369 } 4370 4371 ws = memdb.NewWatchSet() 4372 out, err := state.EvalByID(ws, eval.ID) 4373 if err != nil { 4374 t.Fatalf("err: %v", err) 4375 } 4376 4377 if !reflect.DeepEqual(eval, out) { 4378 t.Fatalf("bad: %#v %#v", eval, out) 4379 } 4380 4381 index, err := state.Index("evals") 4382 if err != nil { 4383 t.Fatalf("err: %v", err) 4384 } 4385 if index != 1000 { 4386 t.Fatalf("bad: %d", index) 4387 } 4388 4389 if watchFired(ws) { 4390 t.Fatalf("bad") 4391 } 4392 } 4393 4394 func TestStateStore_UpsertEvals_CancelBlocked(t *testing.T) { 4395 ci.Parallel(t) 4396 4397 state := testStateStore(t) 4398 4399 // Create two blocked evals for the same job 4400 j := "test-job" 4401 b1, b2 := mock.Eval(), mock.Eval() 4402 b1.JobID = j 4403 b1.Status = structs.EvalStatusBlocked 4404 b2.JobID = j 4405 b2.Status = structs.EvalStatusBlocked 4406 4407 err := state.UpsertEvals(structs.MsgTypeTestSetup, 999, []*structs.Evaluation{b1, b2}) 4408 if err != nil { 4409 t.Fatalf("err: %v", err) 4410 } 4411 4412 // Create one complete and successful eval for the job 4413 eval := mock.Eval() 4414 eval.JobID = j 4415 eval.Status = structs.EvalStatusComplete 4416 4417 // Create a watchset so we can test that the upsert of the complete eval 4418 // fires the watch 4419 ws := memdb.NewWatchSet() 4420 if _, err := state.EvalByID(ws, b1.ID); err != nil { 4421 t.Fatalf("bad: %v", err) 4422 } 4423 if _, err := state.EvalByID(ws, b2.ID); err != nil { 4424 t.Fatalf("bad: %v", err) 4425 } 4426 4427 if err := state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval}); err != nil { 4428 t.Fatalf("err: %v", err) 4429 } 4430 4431 if !watchFired(ws) { 4432 t.Fatalf("bad") 4433 } 4434 4435 ws = memdb.NewWatchSet() 4436 out, err := state.EvalByID(ws, eval.ID) 4437 if err != nil { 4438 t.Fatalf("err: %v", err) 4439 } 4440 4441 if !reflect.DeepEqual(eval, out) { 4442 t.Fatalf("bad: %#v %#v", eval, out) 4443 } 4444 4445 index, err := state.Index("evals") 4446 if err != nil { 4447 t.Fatalf("err: %v", err) 4448 } 4449 if index != 1000 { 4450 t.Fatalf("bad: %d", index) 4451 } 4452 4453 // Get b1/b2 and check they are cancelled 4454 out1, err := state.EvalByID(ws, b1.ID) 4455 if err != nil { 4456 t.Fatalf("err: %v", err) 4457 } 4458 4459 out2, err := state.EvalByID(ws, b2.ID) 4460 if err != nil { 4461 t.Fatalf("err: %v", err) 4462 } 4463 4464 if out1.Status != structs.EvalStatusCancelled || out2.Status != structs.EvalStatusCancelled { 4465 t.Fatalf("bad: %#v %#v", out1, out2) 4466 } 4467 4468 if !strings.Contains(out1.StatusDescription, eval.ID) || !strings.Contains(out2.StatusDescription, eval.ID) { 4469 t.Fatalf("bad status description %#v %#v", out1, out2) 4470 } 4471 4472 if out1.ModifyTime != eval.ModifyTime || out2.ModifyTime != eval.ModifyTime { 4473 t.Fatalf("bad modify time %#v %#v", out1, out2) 4474 } 4475 4476 if watchFired(ws) { 4477 t.Fatalf("bad") 4478 } 4479 } 4480 4481 func TestStateStore_UpsertEvals_Namespace(t *testing.T) { 4482 ci.Parallel(t) 4483 4484 state := testStateStore(t) 4485 ns1 := mock.Namespace() 4486 ns1.Name = "new" 4487 eval1 := mock.Eval() 4488 eval2 := mock.Eval() 4489 eval1.Namespace = ns1.Name 4490 eval2.Namespace = ns1.Name 4491 4492 ns2 := mock.Namespace() 4493 ns2.Name = "new-namespace" 4494 eval3 := mock.Eval() 4495 eval4 := mock.Eval() 4496 eval3.Namespace = ns2.Name 4497 eval4.Namespace = ns2.Name 4498 4499 require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) 4500 4501 // Create watchsets so we can test that update fires the watch 4502 watches := []memdb.WatchSet{memdb.NewWatchSet(), memdb.NewWatchSet()} 4503 _, err := state.EvalsByNamespace(watches[0], ns1.Name) 4504 require.NoError(t, err) 4505 _, err = state.EvalsByNamespace(watches[1], ns2.Name) 4506 require.NoError(t, err) 4507 4508 require.NoError(t, state.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{eval1, eval2, eval3, eval4})) 4509 require.True(t, watchFired(watches[0])) 4510 require.True(t, watchFired(watches[1])) 4511 4512 ws := memdb.NewWatchSet() 4513 iter1, err := state.EvalsByNamespace(ws, ns1.Name) 4514 require.NoError(t, err) 4515 iter2, err := state.EvalsByNamespace(ws, ns2.Name) 4516 require.NoError(t, err) 4517 4518 var out1 []*structs.Evaluation 4519 for { 4520 raw := iter1.Next() 4521 if raw == nil { 4522 break 4523 } 4524 out1 = append(out1, raw.(*structs.Evaluation)) 4525 } 4526 4527 var out2 []*structs.Evaluation 4528 for { 4529 raw := iter2.Next() 4530 if raw == nil { 4531 break 4532 } 4533 out2 = append(out2, raw.(*structs.Evaluation)) 4534 } 4535 4536 require.Len(t, out1, 2) 4537 require.Len(t, out2, 2) 4538 4539 for _, eval := range out1 { 4540 require.Equal(t, ns1.Name, eval.Namespace) 4541 } 4542 for _, eval := range out2 { 4543 require.Equal(t, ns2.Name, eval.Namespace) 4544 } 4545 4546 index, err := state.Index("evals") 4547 require.NoError(t, err) 4548 require.EqualValues(t, 1001, index) 4549 require.False(t, watchFired(ws)) 4550 } 4551 4552 func TestStateStore_Update_UpsertEvals_Eval(t *testing.T) { 4553 ci.Parallel(t) 4554 4555 state := testStateStore(t) 4556 eval := mock.Eval() 4557 4558 err := state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval}) 4559 if err != nil { 4560 t.Fatalf("err: %v", err) 4561 } 4562 4563 // Create a watchset so we can test that delete fires the watch 4564 ws := memdb.NewWatchSet() 4565 ws2 := memdb.NewWatchSet() 4566 if _, err := state.EvalByID(ws, eval.ID); err != nil { 4567 t.Fatalf("bad: %v", err) 4568 } 4569 4570 if _, err := state.EvalsByJob(ws2, eval.Namespace, eval.JobID); err != nil { 4571 t.Fatalf("bad: %v", err) 4572 } 4573 4574 eval2 := mock.Eval() 4575 eval2.ID = eval.ID 4576 eval2.JobID = eval.JobID 4577 err = state.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{eval2}) 4578 if err != nil { 4579 t.Fatalf("err: %v", err) 4580 } 4581 4582 if !watchFired(ws) { 4583 t.Fatalf("bad") 4584 } 4585 if !watchFired(ws2) { 4586 t.Fatalf("bad") 4587 } 4588 4589 ws = memdb.NewWatchSet() 4590 out, err := state.EvalByID(ws, eval.ID) 4591 if err != nil { 4592 t.Fatalf("err: %v", err) 4593 } 4594 4595 if !reflect.DeepEqual(eval2, out) { 4596 t.Fatalf("bad: %#v %#v", eval2, out) 4597 } 4598 4599 if out.CreateIndex != 1000 { 4600 t.Fatalf("bad: %#v", out) 4601 } 4602 if out.ModifyIndex != 1001 { 4603 t.Fatalf("bad: %#v", out) 4604 } 4605 4606 index, err := state.Index("evals") 4607 if err != nil { 4608 t.Fatalf("err: %v", err) 4609 } 4610 if index != 1001 { 4611 t.Fatalf("bad: %d", index) 4612 } 4613 4614 if watchFired(ws) { 4615 t.Fatalf("bad") 4616 } 4617 } 4618 4619 func TestStateStore_UpsertEvals_Eval_ChildJob(t *testing.T) { 4620 ci.Parallel(t) 4621 4622 state := testStateStore(t) 4623 4624 parent := mock.Job() 4625 if err := state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, parent); err != nil { 4626 t.Fatalf("err: %v", err) 4627 } 4628 4629 child := mock.Job() 4630 child.Status = "" 4631 child.ParentID = parent.ID 4632 4633 if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, child); err != nil { 4634 t.Fatalf("err: %v", err) 4635 } 4636 4637 eval := mock.Eval() 4638 eval.Status = structs.EvalStatusComplete 4639 eval.JobID = child.ID 4640 4641 // Create watchsets so we can test that upsert fires the watch 4642 ws := memdb.NewWatchSet() 4643 ws2 := memdb.NewWatchSet() 4644 ws3 := memdb.NewWatchSet() 4645 if _, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID); err != nil { 4646 t.Fatalf("bad: %v", err) 4647 } 4648 if _, err := state.EvalByID(ws2, eval.ID); err != nil { 4649 t.Fatalf("bad: %v", err) 4650 } 4651 if _, err := state.EvalsByJob(ws3, eval.Namespace, eval.JobID); err != nil { 4652 t.Fatalf("bad: %v", err) 4653 } 4654 4655 err := state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval}) 4656 if err != nil { 4657 t.Fatalf("err: %v", err) 4658 } 4659 4660 if !watchFired(ws) { 4661 t.Fatalf("bad") 4662 } 4663 if !watchFired(ws2) { 4664 t.Fatalf("bad") 4665 } 4666 if !watchFired(ws3) { 4667 t.Fatalf("bad") 4668 } 4669 4670 ws = memdb.NewWatchSet() 4671 out, err := state.EvalByID(ws, eval.ID) 4672 if err != nil { 4673 t.Fatalf("err: %v", err) 4674 } 4675 4676 if !reflect.DeepEqual(eval, out) { 4677 t.Fatalf("bad: %#v %#v", eval, out) 4678 } 4679 4680 index, err := state.Index("evals") 4681 if err != nil { 4682 t.Fatalf("err: %v", err) 4683 } 4684 if index != 1000 { 4685 t.Fatalf("bad: %d", index) 4686 } 4687 4688 summary, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID) 4689 if err != nil { 4690 t.Fatalf("err: %v", err) 4691 } 4692 if summary == nil { 4693 t.Fatalf("nil summary") 4694 } 4695 if summary.JobID != parent.ID { 4696 t.Fatalf("bad summary id: %v", parent.ID) 4697 } 4698 if summary.Children == nil { 4699 t.Fatalf("nil children summary") 4700 } 4701 if summary.Children.Pending != 0 || summary.Children.Running != 0 || summary.Children.Dead != 1 { 4702 t.Fatalf("bad children summary: %v", summary.Children) 4703 } 4704 4705 if watchFired(ws) { 4706 t.Fatalf("bad") 4707 } 4708 } 4709 4710 func TestStateStore_DeleteEval_Eval(t *testing.T) { 4711 ci.Parallel(t) 4712 4713 state := testStateStore(t) 4714 eval1 := mock.Eval() 4715 eval2 := mock.Eval() 4716 alloc1 := mock.Alloc() 4717 alloc2 := mock.Alloc() 4718 4719 // Create watchsets so we can test that upsert fires the watch 4720 watches := make([]memdb.WatchSet, 12) 4721 for i := 0; i < 12; i++ { 4722 watches[i] = memdb.NewWatchSet() 4723 } 4724 if _, err := state.EvalByID(watches[0], eval1.ID); err != nil { 4725 t.Fatalf("bad: %v", err) 4726 } 4727 if _, err := state.EvalByID(watches[1], eval2.ID); err != nil { 4728 t.Fatalf("bad: %v", err) 4729 } 4730 if _, err := state.EvalsByJob(watches[2], eval1.Namespace, eval1.JobID); err != nil { 4731 t.Fatalf("bad: %v", err) 4732 } 4733 if _, err := state.EvalsByJob(watches[3], eval2.Namespace, eval2.JobID); err != nil { 4734 t.Fatalf("bad: %v", err) 4735 } 4736 if _, err := state.AllocByID(watches[4], alloc1.ID); err != nil { 4737 t.Fatalf("bad: %v", err) 4738 } 4739 if _, err := state.AllocByID(watches[5], alloc2.ID); err != nil { 4740 t.Fatalf("bad: %v", err) 4741 } 4742 if _, err := state.AllocsByEval(watches[6], alloc1.EvalID); err != nil { 4743 t.Fatalf("bad: %v", err) 4744 } 4745 if _, err := state.AllocsByEval(watches[7], alloc2.EvalID); err != nil { 4746 t.Fatalf("bad: %v", err) 4747 } 4748 if _, err := state.AllocsByJob(watches[8], alloc1.Namespace, alloc1.JobID, false); err != nil { 4749 t.Fatalf("bad: %v", err) 4750 } 4751 if _, err := state.AllocsByJob(watches[9], alloc2.Namespace, alloc2.JobID, false); err != nil { 4752 t.Fatalf("bad: %v", err) 4753 } 4754 if _, err := state.AllocsByNode(watches[10], alloc1.NodeID); err != nil { 4755 t.Fatalf("bad: %v", err) 4756 } 4757 if _, err := state.AllocsByNode(watches[11], alloc2.NodeID); err != nil { 4758 t.Fatalf("bad: %v", err) 4759 } 4760 4761 state.UpsertJobSummary(900, mock.JobSummary(eval1.JobID)) 4762 state.UpsertJobSummary(901, mock.JobSummary(eval2.JobID)) 4763 state.UpsertJobSummary(902, mock.JobSummary(alloc1.JobID)) 4764 state.UpsertJobSummary(903, mock.JobSummary(alloc2.JobID)) 4765 err := state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval1, eval2}) 4766 if err != nil { 4767 t.Fatalf("err: %v", err) 4768 } 4769 4770 err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc1, alloc2}) 4771 if err != nil { 4772 t.Fatalf("err: %v", err) 4773 } 4774 4775 err = state.DeleteEval(1002, []string{eval1.ID, eval2.ID}, []string{alloc1.ID, alloc2.ID}, false) 4776 if err != nil { 4777 t.Fatalf("err: %v", err) 4778 } 4779 4780 for i, ws := range watches { 4781 if !watchFired(ws) { 4782 t.Fatalf("bad %d", i) 4783 } 4784 } 4785 4786 ws := memdb.NewWatchSet() 4787 out, err := state.EvalByID(ws, eval1.ID) 4788 if err != nil { 4789 t.Fatalf("err: %v", err) 4790 } 4791 4792 if out != nil { 4793 t.Fatalf("bad: %#v %#v", eval1, out) 4794 } 4795 4796 out, err = state.EvalByID(ws, eval2.ID) 4797 if err != nil { 4798 t.Fatalf("err: %v", err) 4799 } 4800 4801 if out != nil { 4802 t.Fatalf("bad: %#v %#v", eval1, out) 4803 } 4804 4805 outA, err := state.AllocByID(ws, alloc1.ID) 4806 if err != nil { 4807 t.Fatalf("err: %v", err) 4808 } 4809 4810 if out != nil { 4811 t.Fatalf("bad: %#v %#v", alloc1, outA) 4812 } 4813 4814 outA, err = state.AllocByID(ws, alloc2.ID) 4815 if err != nil { 4816 t.Fatalf("err: %v", err) 4817 } 4818 4819 if out != nil { 4820 t.Fatalf("bad: %#v %#v", alloc1, outA) 4821 } 4822 4823 index, err := state.Index("evals") 4824 if err != nil { 4825 t.Fatalf("err: %v", err) 4826 } 4827 if index != 1002 { 4828 t.Fatalf("bad: %d", index) 4829 } 4830 4831 index, err = state.Index("allocs") 4832 if err != nil { 4833 t.Fatalf("err: %v", err) 4834 } 4835 if index != 1002 { 4836 t.Fatalf("bad: %d", index) 4837 } 4838 4839 if watchFired(ws) { 4840 t.Fatalf("bad") 4841 } 4842 4843 // Call the eval delete function with zero length eval and alloc ID arrays. 4844 // This should result in the table indexes both staying the same, rather 4845 // than updating without cause. 4846 require.NoError(t, state.DeleteEval(1010, []string{}, []string{}, false)) 4847 4848 allocsIndex, err := state.Index("allocs") 4849 require.NoError(t, err) 4850 require.Equal(t, uint64(1002), allocsIndex) 4851 4852 evalsIndex, err := state.Index("evals") 4853 require.NoError(t, err) 4854 require.Equal(t, uint64(1002), evalsIndex) 4855 } 4856 4857 func TestStateStore_DeleteEval_ChildJob(t *testing.T) { 4858 ci.Parallel(t) 4859 4860 state := testStateStore(t) 4861 4862 parent := mock.Job() 4863 if err := state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, parent); err != nil { 4864 t.Fatalf("err: %v", err) 4865 } 4866 4867 child := mock.Job() 4868 child.Status = "" 4869 child.ParentID = parent.ID 4870 4871 if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, child); err != nil { 4872 t.Fatalf("err: %v", err) 4873 } 4874 4875 eval1 := mock.Eval() 4876 eval1.JobID = child.ID 4877 alloc1 := mock.Alloc() 4878 alloc1.JobID = child.ID 4879 4880 err := state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval1}) 4881 if err != nil { 4882 t.Fatalf("err: %v", err) 4883 } 4884 4885 err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc1}) 4886 if err != nil { 4887 t.Fatalf("err: %v", err) 4888 } 4889 4890 // Create watchsets so we can test that delete fires the watch 4891 ws := memdb.NewWatchSet() 4892 if _, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID); err != nil { 4893 t.Fatalf("bad: %v", err) 4894 } 4895 4896 err = state.DeleteEval(1002, []string{eval1.ID}, []string{alloc1.ID}, false) 4897 if err != nil { 4898 t.Fatalf("err: %v", err) 4899 } 4900 4901 if !watchFired(ws) { 4902 t.Fatalf("bad") 4903 } 4904 4905 ws = memdb.NewWatchSet() 4906 summary, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID) 4907 if err != nil { 4908 t.Fatalf("err: %v", err) 4909 } 4910 if summary == nil { 4911 t.Fatalf("nil summary") 4912 } 4913 if summary.JobID != parent.ID { 4914 t.Fatalf("bad summary id: %v", parent.ID) 4915 } 4916 if summary.Children == nil { 4917 t.Fatalf("nil children summary") 4918 } 4919 if summary.Children.Pending != 0 || summary.Children.Running != 0 || summary.Children.Dead != 1 { 4920 t.Fatalf("bad children summary: %v", summary.Children) 4921 } 4922 4923 if watchFired(ws) { 4924 t.Fatalf("bad") 4925 } 4926 } 4927 4928 func TestStateStore_DeleteEval_UserInitiated(t *testing.T) { 4929 ci.Parallel(t) 4930 4931 testState := testStateStore(t) 4932 4933 // Upsert a scheduler config object, so we have something to check and 4934 // modify. 4935 schedulerConfig := structs.SchedulerConfiguration{PauseEvalBroker: false} 4936 require.NoError(t, testState.SchedulerSetConfig(10, &schedulerConfig)) 4937 4938 // Generate some mock evals and upsert these into state. 4939 mockEval1 := mock.Eval() 4940 mockEval2 := mock.Eval() 4941 require.NoError(t, testState.UpsertEvals( 4942 structs.MsgTypeTestSetup, 20, []*structs.Evaluation{mockEval1, mockEval2})) 4943 4944 mockEvalIDs := []string{mockEval1.ID, mockEval2.ID} 4945 4946 // Try and delete the evals without pausing the eval broker. 4947 err := testState.DeleteEval(30, mockEvalIDs, []string{}, true) 4948 require.ErrorContains(t, err, "eval broker is enabled") 4949 4950 // Pause the eval broker on the scheduler config, and try deleting the 4951 // evals again. 4952 schedulerConfig.PauseEvalBroker = true 4953 require.NoError(t, testState.SchedulerSetConfig(30, &schedulerConfig)) 4954 4955 require.NoError(t, testState.DeleteEval(40, mockEvalIDs, []string{}, true)) 4956 4957 ws := memdb.NewWatchSet() 4958 mockEval1Lookup, err := testState.EvalByID(ws, mockEval1.ID) 4959 require.NoError(t, err) 4960 require.Nil(t, mockEval1Lookup) 4961 4962 mockEval2Lookup, err := testState.EvalByID(ws, mockEval1.ID) 4963 require.NoError(t, err) 4964 require.Nil(t, mockEval2Lookup) 4965 } 4966 4967 // TestStateStore_DeleteEvalsByFilter_Pagination tests the pagination logic for 4968 // deleting evals by filter; the business logic is tested more fully in the eval 4969 // endpoint tests. 4970 func TestStateStore_DeleteEvalsByFilter_Pagination(t *testing.T) { 4971 4972 evalCount := 100 4973 index := uint64(100) 4974 4975 store := testStateStore(t) 4976 4977 // Create a set of pending evaluations 4978 4979 schedulerConfig := &structs.SchedulerConfiguration{ 4980 PauseEvalBroker: true, 4981 CreateIndex: index, 4982 ModifyIndex: index, 4983 } 4984 must.NoError(t, store.SchedulerSetConfig(index, schedulerConfig)) 4985 4986 evals := []*structs.Evaluation{} 4987 for i := 0; i < evalCount; i++ { 4988 mockEval := mock.Eval() 4989 evals = append(evals, mockEval) 4990 } 4991 index++ 4992 must.NoError(t, store.UpsertEvals( 4993 structs.MsgTypeTestSetup, index, evals)) 4994 4995 // Delete one page 4996 index++ 4997 must.NoError(t, store.DeleteEvalsByFilter(index, "JobID != \"\"", "", 10)) 4998 4999 countRemaining := func() (string, int) { 5000 lastSeen := "" 5001 remaining := 0 5002 5003 iter, err := store.Evals(nil, SortDefault) 5004 must.NoError(t, err) 5005 for { 5006 raw := iter.Next() 5007 if raw == nil { 5008 break 5009 } 5010 eval := raw.(*structs.Evaluation) 5011 lastSeen = eval.ID 5012 remaining++ 5013 } 5014 return lastSeen, remaining 5015 } 5016 5017 lastSeen, remaining := countRemaining() 5018 must.Eq(t, 90, remaining) 5019 5020 // Delete starting from lastSeen, which should only delete 1 5021 index++ 5022 must.NoError(t, store.DeleteEvalsByFilter(index, "JobID != \"\"", lastSeen, 10)) 5023 5024 _, remaining = countRemaining() 5025 must.Eq(t, 89, remaining) 5026 } 5027 5028 func TestStateStore_EvalIsUserDeleteSafe(t *testing.T) { 5029 ci.Parallel(t) 5030 5031 testCases := []struct { 5032 inputAllocs []*structs.Allocation 5033 inputJob *structs.Job 5034 expectedResult bool 5035 name string 5036 }{ 5037 { 5038 inputAllocs: nil, 5039 inputJob: nil, 5040 expectedResult: true, 5041 name: "job not in state", 5042 }, 5043 { 5044 inputAllocs: nil, 5045 inputJob: &structs.Job{Status: structs.JobStatusDead}, 5046 expectedResult: true, 5047 name: "job stopped", 5048 }, 5049 { 5050 inputAllocs: nil, 5051 inputJob: &structs.Job{Stop: true}, 5052 expectedResult: true, 5053 name: "job dead", 5054 }, 5055 { 5056 inputAllocs: []*structs.Allocation{}, 5057 inputJob: &structs.Job{Status: structs.JobStatusRunning}, 5058 expectedResult: true, 5059 name: "no allocs for eval", 5060 }, 5061 { 5062 inputAllocs: []*structs.Allocation{ 5063 {ClientStatus: structs.AllocClientStatusComplete}, 5064 {ClientStatus: structs.AllocClientStatusRunning}, 5065 }, 5066 inputJob: &structs.Job{Status: structs.JobStatusRunning}, 5067 expectedResult: false, 5068 name: "running alloc for eval", 5069 }, 5070 { 5071 inputAllocs: []*structs.Allocation{ 5072 {ClientStatus: structs.AllocClientStatusComplete}, 5073 {ClientStatus: structs.AllocClientStatusUnknown}, 5074 }, 5075 inputJob: &structs.Job{Status: structs.JobStatusRunning}, 5076 expectedResult: false, 5077 name: "unknown alloc for eval", 5078 }, 5079 { 5080 inputAllocs: []*structs.Allocation{ 5081 {ClientStatus: structs.AllocClientStatusComplete}, 5082 {ClientStatus: structs.AllocClientStatusLost}, 5083 }, 5084 inputJob: &structs.Job{Status: structs.JobStatusRunning}, 5085 expectedResult: true, 5086 name: "complete and lost allocs for eval", 5087 }, 5088 { 5089 inputAllocs: []*structs.Allocation{ 5090 { 5091 ClientStatus: structs.AllocClientStatusFailed, 5092 TaskGroup: "test", 5093 }, 5094 }, 5095 inputJob: &structs.Job{ 5096 Status: structs.JobStatusPending, 5097 TaskGroups: []*structs.TaskGroup{ 5098 { 5099 Name: "test", 5100 ReschedulePolicy: nil, 5101 }, 5102 }, 5103 }, 5104 expectedResult: true, 5105 name: "failed alloc job without reschedule", 5106 }, 5107 { 5108 inputAllocs: []*structs.Allocation{ 5109 { 5110 ClientStatus: structs.AllocClientStatusFailed, 5111 TaskGroup: "test", 5112 }, 5113 }, 5114 inputJob: &structs.Job{ 5115 Status: structs.JobStatusPending, 5116 TaskGroups: []*structs.TaskGroup{ 5117 { 5118 Name: "test", 5119 ReschedulePolicy: &structs.ReschedulePolicy{ 5120 Unlimited: false, 5121 Attempts: 0, 5122 }, 5123 }, 5124 }, 5125 }, 5126 expectedResult: true, 5127 name: "failed alloc job reschedule disabled", 5128 }, 5129 { 5130 inputAllocs: []*structs.Allocation{ 5131 { 5132 ClientStatus: structs.AllocClientStatusFailed, 5133 TaskGroup: "test", 5134 }, 5135 }, 5136 inputJob: &structs.Job{ 5137 Status: structs.JobStatusPending, 5138 TaskGroups: []*structs.TaskGroup{ 5139 { 5140 Name: "test", 5141 ReschedulePolicy: &structs.ReschedulePolicy{ 5142 Unlimited: false, 5143 Attempts: 3, 5144 }, 5145 }, 5146 }, 5147 }, 5148 expectedResult: false, 5149 name: "failed alloc next alloc not set", 5150 }, 5151 { 5152 inputAllocs: []*structs.Allocation{ 5153 { 5154 ClientStatus: structs.AllocClientStatusFailed, 5155 TaskGroup: "test", 5156 NextAllocation: "4aa4930a-8749-c95b-9c67-5ef29b0fc653", 5157 }, 5158 }, 5159 inputJob: &structs.Job{ 5160 Status: structs.JobStatusPending, 5161 TaskGroups: []*structs.TaskGroup{ 5162 { 5163 Name: "test", 5164 ReschedulePolicy: &structs.ReschedulePolicy{ 5165 Unlimited: false, 5166 Attempts: 3, 5167 }, 5168 }, 5169 }, 5170 }, 5171 expectedResult: false, 5172 name: "failed alloc next alloc set", 5173 }, 5174 { 5175 inputAllocs: []*structs.Allocation{ 5176 { 5177 ClientStatus: structs.AllocClientStatusFailed, 5178 TaskGroup: "test", 5179 }, 5180 }, 5181 inputJob: &structs.Job{ 5182 Status: structs.JobStatusPending, 5183 TaskGroups: []*structs.TaskGroup{ 5184 { 5185 Name: "test", 5186 ReschedulePolicy: &structs.ReschedulePolicy{ 5187 Unlimited: true, 5188 }, 5189 }, 5190 }, 5191 }, 5192 expectedResult: false, 5193 name: "failed alloc job reschedule unlimited", 5194 }, 5195 } 5196 5197 for _, tc := range testCases { 5198 t.Run(tc.name, func(t *testing.T) { 5199 actualResult := isEvalDeleteSafe(tc.inputAllocs, tc.inputJob) 5200 require.Equal(t, tc.expectedResult, actualResult) 5201 }) 5202 } 5203 } 5204 5205 func TestStateStore_EvalsByJob(t *testing.T) { 5206 ci.Parallel(t) 5207 5208 state := testStateStore(t) 5209 5210 eval1 := mock.Eval() 5211 eval2 := mock.Eval() 5212 eval2.JobID = eval1.JobID 5213 eval3 := mock.Eval() 5214 evals := []*structs.Evaluation{eval1, eval2} 5215 5216 err := state.UpsertEvals(structs.MsgTypeTestSetup, 1000, evals) 5217 if err != nil { 5218 t.Fatalf("err: %v", err) 5219 } 5220 err = state.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{eval3}) 5221 if err != nil { 5222 t.Fatalf("err: %v", err) 5223 } 5224 5225 ws := memdb.NewWatchSet() 5226 out, err := state.EvalsByJob(ws, eval1.Namespace, eval1.JobID) 5227 if err != nil { 5228 t.Fatalf("err: %v", err) 5229 } 5230 5231 sort.Sort(EvalIDSort(evals)) 5232 sort.Sort(EvalIDSort(out)) 5233 5234 if !reflect.DeepEqual(evals, out) { 5235 t.Fatalf("bad: %#v %#v", evals, out) 5236 } 5237 5238 if watchFired(ws) { 5239 t.Fatalf("bad") 5240 } 5241 } 5242 5243 func TestStateStore_Evals(t *testing.T) { 5244 ci.Parallel(t) 5245 5246 state := testStateStore(t) 5247 var evals []*structs.Evaluation 5248 5249 for i := 0; i < 10; i++ { 5250 eval := mock.Eval() 5251 evals = append(evals, eval) 5252 5253 err := state.UpsertEvals(structs.MsgTypeTestSetup, 1000+uint64(i), []*structs.Evaluation{eval}) 5254 if err != nil { 5255 t.Fatalf("err: %v", err) 5256 } 5257 } 5258 5259 ws := memdb.NewWatchSet() 5260 iter, err := state.Evals(ws, false) 5261 if err != nil { 5262 t.Fatalf("err: %v", err) 5263 } 5264 5265 var out []*structs.Evaluation 5266 for { 5267 raw := iter.Next() 5268 if raw == nil { 5269 break 5270 } 5271 out = append(out, raw.(*structs.Evaluation)) 5272 } 5273 5274 sort.Sort(EvalIDSort(evals)) 5275 sort.Sort(EvalIDSort(out)) 5276 5277 if !reflect.DeepEqual(evals, out) { 5278 t.Fatalf("bad: %#v %#v", evals, out) 5279 } 5280 5281 if watchFired(ws) { 5282 t.Fatalf("bad") 5283 } 5284 } 5285 5286 func TestStateStore_EvalsByIDPrefix(t *testing.T) { 5287 ci.Parallel(t) 5288 5289 state := testStateStore(t) 5290 var evals []*structs.Evaluation 5291 5292 ids := []string{ 5293 "aaaaaaaa-7bfb-395d-eb95-0685af2176b2", 5294 "aaaaaaab-7bfb-395d-eb95-0685af2176b2", 5295 "aaaaaabb-7bfb-395d-eb95-0685af2176b2", 5296 "aaaaabbb-7bfb-395d-eb95-0685af2176b2", 5297 "aaaabbbb-7bfb-395d-eb95-0685af2176b2", 5298 "aaabbbbb-7bfb-395d-eb95-0685af2176b2", 5299 "aabbbbbb-7bfb-395d-eb95-0685af2176b2", 5300 "abbbbbbb-7bfb-395d-eb95-0685af2176b2", 5301 "bbbbbbbb-7bfb-395d-eb95-0685af2176b2", 5302 } 5303 for i := 0; i < 9; i++ { 5304 eval := mock.Eval() 5305 eval.ID = ids[i] 5306 evals = append(evals, eval) 5307 } 5308 5309 err := state.UpsertEvals(structs.MsgTypeTestSetup, 1000, evals) 5310 if err != nil { 5311 t.Fatalf("err: %v", err) 5312 } 5313 5314 gatherEvals := func(iter memdb.ResultIterator) []*structs.Evaluation { 5315 var evals []*structs.Evaluation 5316 for { 5317 raw := iter.Next() 5318 if raw == nil { 5319 break 5320 } 5321 evals = append(evals, raw.(*structs.Evaluation)) 5322 } 5323 return evals 5324 } 5325 5326 t.Run("list by prefix", func(t *testing.T) { 5327 ws := memdb.NewWatchSet() 5328 iter, err := state.EvalsByIDPrefix(ws, structs.DefaultNamespace, "aaaa", SortDefault) 5329 require.NoError(t, err) 5330 5331 got := []string{} 5332 for _, e := range gatherEvals(iter) { 5333 got = append(got, e.ID) 5334 } 5335 5336 expected := []string{ 5337 "aaaaaaaa-7bfb-395d-eb95-0685af2176b2", 5338 "aaaaaaab-7bfb-395d-eb95-0685af2176b2", 5339 "aaaaaabb-7bfb-395d-eb95-0685af2176b2", 5340 "aaaaabbb-7bfb-395d-eb95-0685af2176b2", 5341 "aaaabbbb-7bfb-395d-eb95-0685af2176b2", 5342 } 5343 require.Len(t, got, 5, "expected five evaluations") 5344 require.Equal(t, expected, got) // Must be in this order. 5345 }) 5346 5347 t.Run("invalid prefix", func(t *testing.T) { 5348 ws := memdb.NewWatchSet() 5349 iter, err := state.EvalsByIDPrefix(ws, structs.DefaultNamespace, "b-a7bfb", SortDefault) 5350 require.NoError(t, err) 5351 5352 out := gatherEvals(iter) 5353 require.Len(t, out, 0, "expected zero evaluations") 5354 require.False(t, watchFired(ws)) 5355 }) 5356 5357 t.Run("reverse order", func(t *testing.T) { 5358 ws := memdb.NewWatchSet() 5359 iter, err := state.EvalsByIDPrefix(ws, structs.DefaultNamespace, "aaaa", SortReverse) 5360 require.NoError(t, err) 5361 5362 got := []string{} 5363 for _, e := range gatherEvals(iter) { 5364 got = append(got, e.ID) 5365 } 5366 5367 expected := []string{ 5368 "aaaabbbb-7bfb-395d-eb95-0685af2176b2", 5369 "aaaaabbb-7bfb-395d-eb95-0685af2176b2", 5370 "aaaaaabb-7bfb-395d-eb95-0685af2176b2", 5371 "aaaaaaab-7bfb-395d-eb95-0685af2176b2", 5372 "aaaaaaaa-7bfb-395d-eb95-0685af2176b2", 5373 } 5374 require.Len(t, got, 5, "expected five evaluations") 5375 require.Equal(t, expected, got) // Must be in this order. 5376 }) 5377 } 5378 5379 func TestStateStore_EvalsByIDPrefix_Namespaces(t *testing.T) { 5380 ci.Parallel(t) 5381 5382 state := testStateStore(t) 5383 eval1 := mock.Eval() 5384 eval1.ID = "aabbbbbb-7bfb-395d-eb95-0685af2176b2" 5385 eval2 := mock.Eval() 5386 eval2.ID = "aabbcbbb-7bfb-395d-eb95-0685af2176b2" 5387 sharedPrefix := "aabb" 5388 5389 ns1 := mock.Namespace() 5390 ns1.Name = "namespace1" 5391 ns2 := mock.Namespace() 5392 ns2.Name = "namespace2" 5393 eval1.Namespace = ns1.Name 5394 eval2.Namespace = ns2.Name 5395 5396 require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) 5397 require.NoError(t, state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval1, eval2})) 5398 5399 gatherEvals := func(iter memdb.ResultIterator) []*structs.Evaluation { 5400 var evals []*structs.Evaluation 5401 for { 5402 raw := iter.Next() 5403 if raw == nil { 5404 break 5405 } 5406 evals = append(evals, raw.(*structs.Evaluation)) 5407 } 5408 return evals 5409 } 5410 5411 ws := memdb.NewWatchSet() 5412 iter1, err := state.EvalsByIDPrefix(ws, ns1.Name, sharedPrefix, SortDefault) 5413 require.NoError(t, err) 5414 iter2, err := state.EvalsByIDPrefix(ws, ns2.Name, sharedPrefix, SortDefault) 5415 require.NoError(t, err) 5416 iter3, err := state.EvalsByIDPrefix(ws, structs.AllNamespacesSentinel, sharedPrefix, SortDefault) 5417 require.NoError(t, err) 5418 5419 evalsNs1 := gatherEvals(iter1) 5420 evalsNs2 := gatherEvals(iter2) 5421 evalsNs3 := gatherEvals(iter3) 5422 require.Len(t, evalsNs1, 1) 5423 require.Len(t, evalsNs2, 1) 5424 require.Len(t, evalsNs3, 2) 5425 5426 iter1, err = state.EvalsByIDPrefix(ws, ns1.Name, eval1.ID[:8], SortDefault) 5427 require.NoError(t, err) 5428 5429 evalsNs1 = gatherEvals(iter1) 5430 require.Len(t, evalsNs1, 1) 5431 require.False(t, watchFired(ws)) 5432 } 5433 5434 func TestStateStore_EvalsRelatedToID(t *testing.T) { 5435 ci.Parallel(t) 5436 5437 state := testStateStore(t) 5438 5439 // Create sample evals. 5440 e1 := mock.Eval() 5441 e2 := mock.Eval() 5442 e3 := mock.Eval() 5443 e4 := mock.Eval() 5444 e5 := mock.Eval() 5445 e6 := mock.Eval() 5446 5447 // Link evals. 5448 // This is not accurate for a real scenario, but it's helpful for testing 5449 // the general approach. 5450 // 5451 // e1 -> e2 -> e3 -> e5 5452 // └─-> e4 (blocked) -> e6 5453 e1.NextEval = e2.ID 5454 e2.PreviousEval = e1.ID 5455 5456 e2.NextEval = e3.ID 5457 e3.PreviousEval = e2.ID 5458 5459 e3.BlockedEval = e4.ID 5460 e4.PreviousEval = e3.ID 5461 5462 e3.NextEval = e5.ID 5463 e5.PreviousEval = e3.ID 5464 5465 e4.NextEval = e6.ID 5466 e6.PreviousEval = e4.ID 5467 5468 // Create eval not in chain. 5469 e7 := mock.Eval() 5470 5471 // Create eval with GC'ed related eval. 5472 e8 := mock.Eval() 5473 e8.NextEval = uuid.Generate() 5474 5475 err := state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{e1, e2, e3, e4, e5, e6, e7, e8}) 5476 require.NoError(t, err) 5477 5478 testCases := []struct { 5479 name string 5480 id string 5481 expected []string 5482 }{ 5483 { 5484 name: "linear history", 5485 id: e1.ID, 5486 expected: []string{ 5487 e2.ID, 5488 e3.ID, 5489 e4.ID, 5490 e5.ID, 5491 e6.ID, 5492 }, 5493 }, 5494 { 5495 name: "linear history from middle", 5496 id: e4.ID, 5497 expected: []string{ 5498 e1.ID, 5499 e2.ID, 5500 e3.ID, 5501 e5.ID, 5502 e6.ID, 5503 }, 5504 }, 5505 { 5506 name: "eval not in chain", 5507 id: e7.ID, 5508 expected: []string{}, 5509 }, 5510 { 5511 name: "eval with gc", 5512 id: e8.ID, 5513 expected: []string{}, 5514 }, 5515 { 5516 name: "non-existing eval", 5517 id: uuid.Generate(), 5518 expected: []string{}, 5519 }, 5520 } 5521 5522 for _, tc := range testCases { 5523 t.Run(tc.name, func(t *testing.T) { 5524 ws := memdb.NewWatchSet() 5525 related, err := state.EvalsRelatedToID(ws, tc.id) 5526 require.NoError(t, err) 5527 5528 got := []string{} 5529 for _, e := range related { 5530 got = append(got, e.ID) 5531 } 5532 require.ElementsMatch(t, tc.expected, got) 5533 }) 5534 } 5535 5536 t.Run("blocking query", func(t *testing.T) { 5537 ws := memdb.NewWatchSet() 5538 _, err := state.EvalsRelatedToID(ws, e2.ID) 5539 require.NoError(t, err) 5540 5541 // Update an eval off the chain and make sure watchset doesn't fire. 5542 e7.Status = structs.EvalStatusComplete 5543 state.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{e7}) 5544 require.False(t, watchFired(ws)) 5545 5546 // Update an eval in the chain and make sure watchset does fire. 5547 e3.Status = structs.EvalStatusComplete 5548 state.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{e3}) 5549 require.True(t, watchFired(ws)) 5550 }) 5551 } 5552 5553 func TestStateStore_UpdateAllocsFromClient(t *testing.T) { 5554 ci.Parallel(t) 5555 5556 state := testStateStore(t) 5557 5558 node := mock.Node() 5559 must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 997, node)) 5560 5561 parent := mock.Job() 5562 must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, parent)) 5563 5564 child := mock.Job() 5565 child.Status = "" 5566 child.ParentID = parent.ID 5567 must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, child)) 5568 5569 alloc := mock.Alloc() 5570 alloc.NodeID = node.ID 5571 alloc.JobID = child.ID 5572 alloc.Job = child 5573 must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) 5574 5575 ws := memdb.NewWatchSet() 5576 summary, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID) 5577 must.NoError(t, err) 5578 must.NotNil(t, summary) 5579 must.Eq(t, parent.ID, summary.JobID) 5580 must.NotNil(t, summary.Children) 5581 must.Eq(t, 0, summary.Children.Pending) 5582 must.Eq(t, 1, summary.Children.Running) 5583 must.Eq(t, 0, summary.Children.Dead) 5584 5585 // Create watchsets so we can test that update fires the watch 5586 ws = memdb.NewWatchSet() 5587 _, err = state.JobSummaryByID(ws, parent.Namespace, parent.ID) 5588 must.NoError(t, err) 5589 5590 // Create the delta updates 5591 ts := map[string]*structs.TaskState{"web": {State: structs.TaskStateRunning}} 5592 update := &structs.Allocation{ 5593 ID: alloc.ID, 5594 NodeID: alloc.NodeID, 5595 ClientStatus: structs.AllocClientStatusComplete, 5596 TaskStates: ts, 5597 JobID: alloc.JobID, 5598 TaskGroup: alloc.TaskGroup, 5599 } 5600 err = state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{update}) 5601 must.NoError(t, err) 5602 5603 must.True(t, watchFired(ws)) 5604 5605 ws = memdb.NewWatchSet() 5606 summary, err = state.JobSummaryByID(ws, parent.Namespace, parent.ID) 5607 must.NoError(t, err) 5608 must.NotNil(t, summary) 5609 must.Eq(t, parent.ID, summary.JobID) 5610 must.NotNil(t, summary.Children) 5611 must.Eq(t, 0, summary.Children.Pending) 5612 must.Eq(t, 0, summary.Children.Running) 5613 must.Eq(t, 1, summary.Children.Dead) 5614 5615 must.False(t, watchFired(ws)) 5616 } 5617 5618 func TestStateStore_UpdateAllocsFromClient_ChildJob(t *testing.T) { 5619 ci.Parallel(t) 5620 5621 state := testStateStore(t) 5622 5623 node := mock.Node() 5624 5625 alloc1 := mock.Alloc() 5626 alloc1.NodeID = node.ID 5627 5628 alloc2 := mock.Alloc() 5629 alloc2.NodeID = node.ID 5630 5631 must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 998, node)) 5632 must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc1.Job)) 5633 must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc2.Job)) 5634 must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) 5635 5636 // Create watchsets so we can test that update fires the watch 5637 watches := make([]memdb.WatchSet, 8) 5638 for i := 0; i < 8; i++ { 5639 watches[i] = memdb.NewWatchSet() 5640 } 5641 _, err := state.AllocByID(watches[0], alloc1.ID) 5642 must.NoError(t, err) 5643 _, err = state.AllocByID(watches[1], alloc2.ID) 5644 must.NoError(t, err) 5645 5646 _, err = state.AllocsByEval(watches[2], alloc1.EvalID) 5647 must.NoError(t, err) 5648 _, err = state.AllocsByEval(watches[3], alloc2.EvalID) 5649 must.NoError(t, err) 5650 5651 _, err = state.AllocsByJob(watches[4], alloc1.Namespace, alloc1.JobID, false) 5652 must.NoError(t, err) 5653 _, err = state.AllocsByJob(watches[5], alloc2.Namespace, alloc2.JobID, false) 5654 must.NoError(t, err) 5655 5656 _, err = state.AllocsByNode(watches[6], alloc1.NodeID) 5657 must.NoError(t, err) 5658 _, err = state.AllocsByNode(watches[7], alloc2.NodeID) 5659 must.NoError(t, err) 5660 5661 // Create the delta updates 5662 ts := map[string]*structs.TaskState{"web": {State: structs.TaskStatePending}} 5663 update := &structs.Allocation{ 5664 ID: alloc1.ID, 5665 NodeID: alloc1.NodeID, 5666 ClientStatus: structs.AllocClientStatusFailed, 5667 TaskStates: ts, 5668 JobID: alloc1.JobID, 5669 TaskGroup: alloc1.TaskGroup, 5670 } 5671 update2 := &structs.Allocation{ 5672 ID: alloc2.ID, 5673 NodeID: alloc2.NodeID, 5674 ClientStatus: structs.AllocClientStatusRunning, 5675 TaskStates: ts, 5676 JobID: alloc2.JobID, 5677 TaskGroup: alloc2.TaskGroup, 5678 } 5679 5680 err = state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{update, update2}) 5681 must.NoError(t, err) 5682 5683 for _, ws := range watches { 5684 must.True(t, watchFired(ws)) 5685 } 5686 5687 ws := memdb.NewWatchSet() 5688 out, err := state.AllocByID(ws, alloc1.ID) 5689 must.NoError(t, err) 5690 5691 alloc1.CreateIndex = 1000 5692 alloc1.ModifyIndex = 1001 5693 alloc1.TaskStates = ts 5694 alloc1.ClientStatus = structs.AllocClientStatusFailed 5695 must.Eq(t, alloc1, out) 5696 5697 out, err = state.AllocByID(ws, alloc2.ID) 5698 must.NoError(t, err) 5699 5700 alloc2.ModifyIndex = 1000 5701 alloc2.ModifyIndex = 1001 5702 alloc2.ClientStatus = structs.AllocClientStatusRunning 5703 alloc2.TaskStates = ts 5704 must.Eq(t, alloc2, out) 5705 5706 index, err := state.Index("allocs") 5707 must.NoError(t, err) 5708 must.Eq(t, 1001, index) 5709 5710 // Ensure summaries have been updated 5711 summary, err := state.JobSummaryByID(ws, alloc1.Namespace, alloc1.JobID) 5712 must.NoError(t, err) 5713 5714 tgSummary := summary.Summary["web"] 5715 must.Eq(t, 1, tgSummary.Failed) 5716 5717 summary2, err := state.JobSummaryByID(ws, alloc2.Namespace, alloc2.JobID) 5718 must.NoError(t, err) 5719 5720 tgSummary2 := summary2.Summary["web"] 5721 must.Eq(t, 1, tgSummary2.Running) 5722 5723 must.False(t, watchFired(ws)) 5724 } 5725 5726 func TestStateStore_UpdateMultipleAllocsFromClient(t *testing.T) { 5727 ci.Parallel(t) 5728 5729 state := testStateStore(t) 5730 5731 node := mock.Node() 5732 5733 alloc := mock.Alloc() 5734 alloc.NodeID = node.ID 5735 5736 must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 998, node)) 5737 must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) 5738 must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) 5739 5740 // Create the delta updates 5741 ts := map[string]*structs.TaskState{"web": {State: structs.TaskStatePending}} 5742 update := &structs.Allocation{ 5743 ID: alloc.ID, 5744 NodeID: alloc.NodeID, 5745 ClientStatus: structs.AllocClientStatusRunning, 5746 TaskStates: ts, 5747 JobID: alloc.JobID, 5748 TaskGroup: alloc.TaskGroup, 5749 } 5750 update2 := &structs.Allocation{ 5751 ID: alloc.ID, 5752 NodeID: alloc.NodeID, 5753 ClientStatus: structs.AllocClientStatusPending, 5754 TaskStates: ts, 5755 JobID: alloc.JobID, 5756 TaskGroup: alloc.TaskGroup, 5757 } 5758 5759 err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{update, update2}) 5760 must.NoError(t, err) 5761 5762 ws := memdb.NewWatchSet() 5763 out, err := state.AllocByID(ws, alloc.ID) 5764 must.NoError(t, err) 5765 5766 alloc.CreateIndex = 1000 5767 alloc.ModifyIndex = 1001 5768 alloc.TaskStates = ts 5769 alloc.ClientStatus = structs.AllocClientStatusPending 5770 must.Eq(t, alloc, out) 5771 5772 summary, err := state.JobSummaryByID(ws, alloc.Namespace, alloc.JobID) 5773 expectedSummary := &structs.JobSummary{ 5774 JobID: alloc.JobID, 5775 Namespace: alloc.Namespace, 5776 Summary: map[string]structs.TaskGroupSummary{ 5777 "web": { 5778 Starting: 1, 5779 }, 5780 }, 5781 Children: new(structs.JobChildrenSummary), 5782 CreateIndex: 999, 5783 ModifyIndex: 1001, 5784 } 5785 must.NoError(t, err) 5786 must.Eq(t, summary, expectedSummary) 5787 } 5788 5789 func TestStateStore_UpdateAllocsFromClient_Deployment(t *testing.T) { 5790 ci.Parallel(t) 5791 5792 state := testStateStore(t) 5793 5794 node := mock.Node() 5795 5796 alloc := mock.Alloc() 5797 now := time.Now() 5798 alloc.NodeID = node.ID 5799 alloc.CreateTime = now.UnixNano() 5800 5801 pdeadline := 5 * time.Minute 5802 deployment := mock.Deployment() 5803 deployment.TaskGroups[alloc.TaskGroup].ProgressDeadline = pdeadline 5804 alloc.DeploymentID = deployment.ID 5805 5806 must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 998, node)) 5807 must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) 5808 must.NoError(t, state.UpsertDeployment(1000, deployment)) 5809 must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})) 5810 5811 healthy := now.Add(time.Second) 5812 update := &structs.Allocation{ 5813 ID: alloc.ID, 5814 NodeID: alloc.NodeID, 5815 ClientStatus: structs.AllocClientStatusRunning, 5816 JobID: alloc.JobID, 5817 TaskGroup: alloc.TaskGroup, 5818 DeploymentStatus: &structs.AllocDeploymentStatus{ 5819 Healthy: pointer.Of(true), 5820 Timestamp: healthy, 5821 }, 5822 } 5823 must.NoError(t, state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{update})) 5824 5825 // Check that the deployment state was updated because the healthy 5826 // deployment 5827 dout, err := state.DeploymentByID(nil, deployment.ID) 5828 must.NoError(t, err) 5829 must.NotNil(t, dout) 5830 must.MapLen(t, 1, dout.TaskGroups) 5831 dstate := dout.TaskGroups[alloc.TaskGroup] 5832 must.NotNil(t, dstate) 5833 must.Eq(t, 1, dstate.PlacedAllocs) 5834 must.True(t, healthy.Add(pdeadline).Equal(dstate.RequireProgressBy)) 5835 } 5836 5837 // This tests that the deployment state is merged correctly 5838 func TestStateStore_UpdateAllocsFromClient_DeploymentStateMerges(t *testing.T) { 5839 ci.Parallel(t) 5840 5841 state := testStateStore(t) 5842 5843 node := mock.Node() 5844 5845 alloc := mock.Alloc() 5846 now := time.Now() 5847 alloc.NodeID = node.ID 5848 alloc.CreateTime = now.UnixNano() 5849 5850 pdeadline := 5 * time.Minute 5851 deployment := mock.Deployment() 5852 deployment.TaskGroups[alloc.TaskGroup].ProgressDeadline = pdeadline 5853 alloc.DeploymentID = deployment.ID 5854 alloc.DeploymentStatus = &structs.AllocDeploymentStatus{ 5855 Canary: true, 5856 } 5857 5858 must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 998, node)) 5859 must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) 5860 must.NoError(t, state.UpsertDeployment(1000, deployment)) 5861 must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})) 5862 5863 update := &structs.Allocation{ 5864 ID: alloc.ID, 5865 NodeID: alloc.NodeID, 5866 ClientStatus: structs.AllocClientStatusRunning, 5867 JobID: alloc.JobID, 5868 TaskGroup: alloc.TaskGroup, 5869 DeploymentStatus: &structs.AllocDeploymentStatus{ 5870 Healthy: pointer.Of(true), 5871 Canary: false, 5872 }, 5873 } 5874 must.NoError(t, state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{update})) 5875 5876 // Check that the merging of the deployment status was correct 5877 out, err := state.AllocByID(nil, alloc.ID) 5878 must.NoError(t, err) 5879 must.NotNil(t, out) 5880 must.True(t, out.DeploymentStatus.Canary) 5881 must.NotNil(t, out.DeploymentStatus.Healthy) 5882 must.True(t, *out.DeploymentStatus.Healthy) 5883 } 5884 5885 // TestStateStore_UpdateAllocsFromClient_UpdateNodes verifies that the relevant 5886 // node data is updated when clients update their allocs. 5887 func TestStateStore_UpdateAllocsFromClient_UpdateNodes(t *testing.T) { 5888 ci.Parallel(t) 5889 5890 state := testStateStore(t) 5891 5892 node1 := mock.Node() 5893 alloc1 := mock.Alloc() 5894 alloc1.NodeID = node1.ID 5895 5896 node2 := mock.Node() 5897 alloc2 := mock.Alloc() 5898 alloc2.NodeID = node2.ID 5899 5900 node3 := mock.Node() 5901 alloc3 := mock.Alloc() 5902 alloc3.NodeID = node3.ID 5903 5904 must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1000, node1)) 5905 must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1001, node2)) 5906 must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1002, node3)) 5907 must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1003, nil, alloc1.Job)) 5908 must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1004, nil, alloc2.Job)) 5909 must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1005, []*structs.Allocation{alloc1, alloc2, alloc3})) 5910 5911 // Create watches to make sure they fire when nodes are updated. 5912 ws1 := memdb.NewWatchSet() 5913 _, err := state.NodeByID(ws1, node1.ID) 5914 must.NoError(t, err) 5915 5916 ws2 := memdb.NewWatchSet() 5917 _, err = state.NodeByID(ws2, node2.ID) 5918 must.NoError(t, err) 5919 5920 ws3 := memdb.NewWatchSet() 5921 _, err = state.NodeByID(ws3, node3.ID) 5922 must.NoError(t, err) 5923 5924 // Create and apply alloc updates. 5925 // Don't update alloc 3. 5926 updateAlloc1 := &structs.Allocation{ 5927 ID: alloc1.ID, 5928 NodeID: alloc1.NodeID, 5929 ClientStatus: structs.AllocClientStatusRunning, 5930 JobID: alloc1.JobID, 5931 TaskGroup: alloc1.TaskGroup, 5932 } 5933 updateAlloc2 := &structs.Allocation{ 5934 ID: alloc2.ID, 5935 NodeID: alloc2.NodeID, 5936 ClientStatus: structs.AllocClientStatusRunning, 5937 JobID: alloc2.JobID, 5938 TaskGroup: alloc2.TaskGroup, 5939 } 5940 updateAllocNonExisting := &structs.Allocation{ 5941 ID: uuid.Generate(), 5942 NodeID: uuid.Generate(), 5943 ClientStatus: structs.AllocClientStatusRunning, 5944 JobID: uuid.Generate(), 5945 TaskGroup: "group", 5946 } 5947 5948 err = state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1005, []*structs.Allocation{ 5949 updateAlloc1, updateAlloc2, updateAllocNonExisting, 5950 }) 5951 must.NoError(t, err) 5952 5953 // Check that node update watches fired. 5954 must.True(t, watchFired(ws1)) 5955 must.True(t, watchFired(ws2)) 5956 5957 // Check that node LastAllocUpdateIndex were updated. 5958 ws := memdb.NewWatchSet() 5959 out, err := state.NodeByID(ws, node1.ID) 5960 must.NoError(t, err) 5961 must.NotNil(t, out) 5962 must.Eq(t, 1005, out.LastAllocUpdateIndex) 5963 must.False(t, watchFired(ws)) 5964 5965 out, err = state.NodeByID(ws, node2.ID) 5966 must.NoError(t, err) 5967 must.NotNil(t, out) 5968 must.Eq(t, 1005, out.LastAllocUpdateIndex) 5969 must.False(t, watchFired(ws)) 5970 5971 // Node 3 should not be updated. 5972 out, err = state.NodeByID(ws, node3.ID) 5973 must.NoError(t, err) 5974 must.NotNil(t, out) 5975 must.Eq(t, 0, out.LastAllocUpdateIndex) 5976 must.False(t, watchFired(ws)) 5977 } 5978 5979 func TestStateStore_UpsertAlloc_Alloc(t *testing.T) { 5980 ci.Parallel(t) 5981 5982 state := testStateStore(t) 5983 alloc := mock.Alloc() 5984 5985 if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job); err != nil { 5986 t.Fatalf("err: %v", err) 5987 } 5988 5989 // Create watchsets so we can test that update fires the watch 5990 watches := make([]memdb.WatchSet, 4) 5991 for i := 0; i < 4; i++ { 5992 watches[i] = memdb.NewWatchSet() 5993 } 5994 if _, err := state.AllocByID(watches[0], alloc.ID); err != nil { 5995 t.Fatalf("bad: %v", err) 5996 } 5997 if _, err := state.AllocsByEval(watches[1], alloc.EvalID); err != nil { 5998 t.Fatalf("bad: %v", err) 5999 } 6000 if _, err := state.AllocsByJob(watches[2], alloc.Namespace, alloc.JobID, false); err != nil { 6001 t.Fatalf("bad: %v", err) 6002 } 6003 if _, err := state.AllocsByNode(watches[3], alloc.NodeID); err != nil { 6004 t.Fatalf("bad: %v", err) 6005 } 6006 6007 err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) 6008 if err != nil { 6009 t.Fatalf("err: %v", err) 6010 } 6011 6012 for i, ws := range watches { 6013 if !watchFired(ws) { 6014 t.Fatalf("bad %d", i) 6015 } 6016 } 6017 6018 ws := memdb.NewWatchSet() 6019 out, err := state.AllocByID(ws, alloc.ID) 6020 if err != nil { 6021 t.Fatalf("err: %v", err) 6022 } 6023 6024 if !reflect.DeepEqual(alloc, out) { 6025 t.Fatalf("bad: %#v %#v", alloc, out) 6026 } 6027 6028 index, err := state.Index("allocs") 6029 if err != nil { 6030 t.Fatalf("err: %v", err) 6031 } 6032 if index != 1000 { 6033 t.Fatalf("bad: %d", index) 6034 } 6035 6036 summary, err := state.JobSummaryByID(ws, alloc.Namespace, alloc.JobID) 6037 if err != nil { 6038 t.Fatalf("err: %v", err) 6039 } 6040 6041 tgSummary, ok := summary.Summary["web"] 6042 if !ok { 6043 t.Fatalf("no summary for task group web") 6044 } 6045 if tgSummary.Starting != 1 { 6046 t.Fatalf("expected queued: %v, actual: %v", 1, tgSummary.Starting) 6047 } 6048 6049 if watchFired(ws) { 6050 t.Fatalf("bad") 6051 } 6052 } 6053 6054 func TestStateStore_UpsertAlloc_Deployment(t *testing.T) { 6055 ci.Parallel(t) 6056 require := require.New(t) 6057 6058 state := testStateStore(t) 6059 alloc := mock.Alloc() 6060 now := time.Now() 6061 alloc.CreateTime = now.UnixNano() 6062 alloc.ModifyTime = now.UnixNano() 6063 pdeadline := 5 * time.Minute 6064 deployment := mock.Deployment() 6065 deployment.TaskGroups[alloc.TaskGroup].ProgressDeadline = pdeadline 6066 alloc.DeploymentID = deployment.ID 6067 6068 require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) 6069 require.Nil(state.UpsertDeployment(1000, deployment)) 6070 6071 // Create a watch set so we can test that update fires the watch 6072 ws := memdb.NewWatchSet() 6073 require.Nil(state.AllocsByDeployment(ws, alloc.DeploymentID)) 6074 6075 err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}) 6076 require.Nil(err) 6077 6078 if !watchFired(ws) { 6079 t.Fatalf("watch not fired") 6080 } 6081 6082 ws = memdb.NewWatchSet() 6083 allocs, err := state.AllocsByDeployment(ws, alloc.DeploymentID) 6084 require.Nil(err) 6085 require.Len(allocs, 1) 6086 require.EqualValues(alloc, allocs[0]) 6087 6088 index, err := state.Index("allocs") 6089 require.Nil(err) 6090 require.EqualValues(1001, index) 6091 if watchFired(ws) { 6092 t.Fatalf("bad") 6093 } 6094 6095 // Check that the deployment state was updated 6096 dout, err := state.DeploymentByID(nil, deployment.ID) 6097 require.Nil(err) 6098 require.NotNil(dout) 6099 require.Len(dout.TaskGroups, 1) 6100 dstate := dout.TaskGroups[alloc.TaskGroup] 6101 require.NotNil(dstate) 6102 require.Equal(1, dstate.PlacedAllocs) 6103 require.True(now.Add(pdeadline).Equal(dstate.RequireProgressBy)) 6104 } 6105 6106 func TestStateStore_UpsertAlloc_AllocsByNamespace(t *testing.T) { 6107 ci.Parallel(t) 6108 6109 state := testStateStore(t) 6110 6111 ns1 := mock.Namespace() 6112 ns1.Name = "namespaced" 6113 alloc1 := mock.Alloc() 6114 alloc2 := mock.Alloc() 6115 alloc1.Namespace = ns1.Name 6116 alloc1.Job.Namespace = ns1.Name 6117 alloc2.Namespace = ns1.Name 6118 alloc2.Job.Namespace = ns1.Name 6119 6120 ns2 := mock.Namespace() 6121 ns2.Name = "new-namespace" 6122 alloc3 := mock.Alloc() 6123 alloc4 := mock.Alloc() 6124 alloc3.Namespace = ns2.Name 6125 alloc3.Job.Namespace = ns2.Name 6126 alloc4.Namespace = ns2.Name 6127 alloc4.Job.Namespace = ns2.Name 6128 6129 require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) 6130 require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc1.Job)) 6131 require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, alloc3.Job)) 6132 6133 // Create watchsets so we can test that update fires the watch 6134 watches := []memdb.WatchSet{memdb.NewWatchSet(), memdb.NewWatchSet()} 6135 _, err := state.AllocsByNamespace(watches[0], ns1.Name) 6136 require.NoError(t, err) 6137 _, err = state.AllocsByNamespace(watches[1], ns2.Name) 6138 require.NoError(t, err) 6139 6140 require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc1, alloc2, alloc3, alloc4})) 6141 require.True(t, watchFired(watches[0])) 6142 require.True(t, watchFired(watches[1])) 6143 6144 ws := memdb.NewWatchSet() 6145 iter1, err := state.AllocsByNamespace(ws, ns1.Name) 6146 require.NoError(t, err) 6147 iter2, err := state.AllocsByNamespace(ws, ns2.Name) 6148 require.NoError(t, err) 6149 6150 var out1 []*structs.Allocation 6151 for { 6152 raw := iter1.Next() 6153 if raw == nil { 6154 break 6155 } 6156 out1 = append(out1, raw.(*structs.Allocation)) 6157 } 6158 6159 var out2 []*structs.Allocation 6160 for { 6161 raw := iter2.Next() 6162 if raw == nil { 6163 break 6164 } 6165 out2 = append(out2, raw.(*structs.Allocation)) 6166 } 6167 6168 require.Len(t, out1, 2) 6169 require.Len(t, out2, 2) 6170 6171 for _, alloc := range out1 { 6172 require.Equal(t, ns1.Name, alloc.Namespace) 6173 } 6174 for _, alloc := range out2 { 6175 require.Equal(t, ns2.Name, alloc.Namespace) 6176 } 6177 6178 index, err := state.Index("allocs") 6179 require.NoError(t, err) 6180 require.EqualValues(t, 1001, index) 6181 require.False(t, watchFired(ws)) 6182 } 6183 6184 // Testing to ensure we keep issue 6185 // https://github.com/hernad/nomad/issues/2583 fixed 6186 func TestStateStore_UpsertAlloc_No_Job(t *testing.T) { 6187 ci.Parallel(t) 6188 6189 state := testStateStore(t) 6190 alloc := mock.Alloc() 6191 alloc.Job = nil 6192 6193 err := state.UpsertAllocs(structs.MsgTypeTestSetup, 999, []*structs.Allocation{alloc}) 6194 if err == nil || !strings.Contains(err.Error(), "without a job") { 6195 t.Fatalf("expect err: %v", err) 6196 } 6197 } 6198 6199 func TestStateStore_UpsertAlloc_ChildJob(t *testing.T) { 6200 ci.Parallel(t) 6201 6202 state := testStateStore(t) 6203 6204 parent := mock.Job() 6205 require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 998, nil, parent)) 6206 6207 child := mock.Job() 6208 child.Status = "" 6209 child.ParentID = parent.ID 6210 6211 require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, child)) 6212 6213 alloc := mock.Alloc() 6214 alloc.JobID = child.ID 6215 alloc.Job = child 6216 6217 // Create watchsets so we can test that delete fires the watch 6218 ws := memdb.NewWatchSet() 6219 _, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID) 6220 require.NoError(t, err) 6221 6222 err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) 6223 require.NoError(t, err) 6224 6225 require.True(t, watchFired(ws)) 6226 6227 ws = memdb.NewWatchSet() 6228 summary, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID) 6229 require.NoError(t, err) 6230 require.NotNil(t, summary) 6231 6232 require.Equal(t, parent.ID, summary.JobID) 6233 require.NotNil(t, summary.Children) 6234 6235 require.Equal(t, int64(0), summary.Children.Pending) 6236 require.Equal(t, int64(1), summary.Children.Running) 6237 require.Equal(t, int64(0), summary.Children.Dead) 6238 6239 require.False(t, watchFired(ws)) 6240 } 6241 6242 func TestStateStore_UpdateAlloc_Alloc(t *testing.T) { 6243 ci.Parallel(t) 6244 6245 state := testStateStore(t) 6246 alloc := mock.Alloc() 6247 6248 if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job); err != nil { 6249 t.Fatalf("err: %v", err) 6250 } 6251 6252 err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) 6253 if err != nil { 6254 t.Fatalf("err: %v", err) 6255 } 6256 6257 ws := memdb.NewWatchSet() 6258 summary, err := state.JobSummaryByID(ws, alloc.Namespace, alloc.JobID) 6259 if err != nil { 6260 t.Fatalf("err: %v", err) 6261 } 6262 tgSummary := summary.Summary["web"] 6263 if tgSummary.Starting != 1 { 6264 t.Fatalf("expected starting: %v, actual: %v", 1, tgSummary.Starting) 6265 } 6266 6267 alloc2 := mock.Alloc() 6268 alloc2.ID = alloc.ID 6269 alloc2.NodeID = alloc.NodeID + ".new" 6270 state.UpsertJobSummary(1001, mock.JobSummary(alloc2.JobID)) 6271 6272 // Create watchsets so we can test that update fires the watch 6273 watches := make([]memdb.WatchSet, 4) 6274 for i := 0; i < 4; i++ { 6275 watches[i] = memdb.NewWatchSet() 6276 } 6277 if _, err := state.AllocByID(watches[0], alloc2.ID); err != nil { 6278 t.Fatalf("bad: %v", err) 6279 } 6280 if _, err := state.AllocsByEval(watches[1], alloc2.EvalID); err != nil { 6281 t.Fatalf("bad: %v", err) 6282 } 6283 if _, err := state.AllocsByJob(watches[2], alloc2.Namespace, alloc2.JobID, false); err != nil { 6284 t.Fatalf("bad: %v", err) 6285 } 6286 if _, err := state.AllocsByNode(watches[3], alloc2.NodeID); err != nil { 6287 t.Fatalf("bad: %v", err) 6288 } 6289 6290 err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc2}) 6291 if err != nil { 6292 t.Fatalf("err: %v", err) 6293 } 6294 6295 for i, ws := range watches { 6296 if !watchFired(ws) { 6297 t.Fatalf("bad %d", i) 6298 } 6299 } 6300 6301 ws = memdb.NewWatchSet() 6302 out, err := state.AllocByID(ws, alloc.ID) 6303 if err != nil { 6304 t.Fatalf("err: %v", err) 6305 } 6306 6307 if !reflect.DeepEqual(alloc2, out) { 6308 t.Fatalf("bad: %#v %#v", alloc2, out) 6309 } 6310 6311 if out.CreateIndex != 1000 { 6312 t.Fatalf("bad: %#v", out) 6313 } 6314 if out.ModifyIndex != 1002 { 6315 t.Fatalf("bad: %#v", out) 6316 } 6317 6318 index, err := state.Index("allocs") 6319 if err != nil { 6320 t.Fatalf("err: %v", err) 6321 } 6322 if index != 1002 { 6323 t.Fatalf("bad: %d", index) 6324 } 6325 6326 // Ensure that summary hasb't changed 6327 summary, err = state.JobSummaryByID(ws, alloc.Namespace, alloc.JobID) 6328 if err != nil { 6329 t.Fatalf("err: %v", err) 6330 } 6331 tgSummary = summary.Summary["web"] 6332 if tgSummary.Starting != 1 { 6333 t.Fatalf("expected starting: %v, actual: %v", 1, tgSummary.Starting) 6334 } 6335 6336 if watchFired(ws) { 6337 t.Fatalf("bad") 6338 } 6339 } 6340 6341 // This test ensures that the state store will mark the clients status as lost 6342 // when set rather than preferring the existing status. 6343 func TestStateStore_UpdateAlloc_Lost(t *testing.T) { 6344 ci.Parallel(t) 6345 6346 state := testStateStore(t) 6347 alloc := mock.Alloc() 6348 alloc.ClientStatus = "foo" 6349 6350 if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job); err != nil { 6351 t.Fatalf("err: %v", err) 6352 } 6353 6354 err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) 6355 if err != nil { 6356 t.Fatalf("err: %v", err) 6357 } 6358 6359 alloc2 := new(structs.Allocation) 6360 *alloc2 = *alloc 6361 alloc2.ClientStatus = structs.AllocClientStatusLost 6362 if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc2}); err != nil { 6363 t.Fatalf("err: %v", err) 6364 } 6365 6366 ws := memdb.NewWatchSet() 6367 out, err := state.AllocByID(ws, alloc2.ID) 6368 if err != nil { 6369 t.Fatalf("err: %v", err) 6370 } 6371 6372 if out.ClientStatus != structs.AllocClientStatusLost { 6373 t.Fatalf("bad: %#v", out) 6374 } 6375 } 6376 6377 // This test ensures an allocation can be updated when there is no job 6378 // associated with it. This will happen when a job is stopped by an user which 6379 // has non-terminal allocations on clients 6380 func TestStateStore_UpdateAlloc_NoJob(t *testing.T) { 6381 ci.Parallel(t) 6382 6383 state := testStateStore(t) 6384 alloc := mock.Alloc() 6385 6386 // Upsert a job 6387 state.UpsertJobSummary(998, mock.JobSummary(alloc.JobID)) 6388 if err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job); err != nil { 6389 t.Fatalf("err: %v", err) 6390 } 6391 6392 err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) 6393 if err != nil { 6394 t.Fatalf("err: %v", err) 6395 } 6396 6397 if err := state.DeleteJob(1001, alloc.Namespace, alloc.JobID); err != nil { 6398 t.Fatalf("err: %v", err) 6399 } 6400 6401 // Update the desired state of the allocation to stop 6402 allocCopy := alloc.Copy() 6403 allocCopy.DesiredStatus = structs.AllocDesiredStatusStop 6404 if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{allocCopy}); err != nil { 6405 t.Fatalf("err: %v", err) 6406 } 6407 6408 // Update the client state of the allocation to complete 6409 allocCopy1 := allocCopy.Copy() 6410 allocCopy1.ClientStatus = structs.AllocClientStatusComplete 6411 if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{allocCopy1}); err != nil { 6412 t.Fatalf("err: %v", err) 6413 } 6414 6415 ws := memdb.NewWatchSet() 6416 out, _ := state.AllocByID(ws, alloc.ID) 6417 // Update the modify index of the alloc before comparing 6418 allocCopy1.ModifyIndex = 1003 6419 if !reflect.DeepEqual(out, allocCopy1) { 6420 t.Fatalf("expected: %#v \n actual: %#v", allocCopy1, out) 6421 } 6422 } 6423 6424 func TestStateStore_UpdateAllocDesiredTransition(t *testing.T) { 6425 ci.Parallel(t) 6426 require := require.New(t) 6427 6428 state := testStateStore(t) 6429 alloc := mock.Alloc() 6430 6431 require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job)) 6432 require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) 6433 6434 t1 := &structs.DesiredTransition{ 6435 Migrate: pointer.Of(true), 6436 } 6437 t2 := &structs.DesiredTransition{ 6438 Migrate: pointer.Of(false), 6439 } 6440 eval := &structs.Evaluation{ 6441 ID: uuid.Generate(), 6442 Namespace: alloc.Namespace, 6443 Priority: alloc.Job.Priority, 6444 Type: alloc.Job.Type, 6445 TriggeredBy: structs.EvalTriggerNodeDrain, 6446 JobID: alloc.Job.ID, 6447 JobModifyIndex: alloc.Job.ModifyIndex, 6448 Status: structs.EvalStatusPending, 6449 } 6450 evals := []*structs.Evaluation{eval} 6451 6452 m := map[string]*structs.DesiredTransition{alloc.ID: t1} 6453 require.Nil(state.UpdateAllocsDesiredTransitions(structs.MsgTypeTestSetup, 1001, m, evals)) 6454 6455 ws := memdb.NewWatchSet() 6456 out, err := state.AllocByID(ws, alloc.ID) 6457 require.Nil(err) 6458 require.NotNil(out.DesiredTransition.Migrate) 6459 require.True(*out.DesiredTransition.Migrate) 6460 require.EqualValues(1000, out.CreateIndex) 6461 require.EqualValues(1001, out.ModifyIndex) 6462 6463 index, err := state.Index("allocs") 6464 require.Nil(err) 6465 require.EqualValues(1001, index) 6466 6467 // Check the eval is created 6468 eout, err := state.EvalByID(nil, eval.ID) 6469 require.Nil(err) 6470 require.NotNil(eout) 6471 6472 m = map[string]*structs.DesiredTransition{alloc.ID: t2} 6473 require.Nil(state.UpdateAllocsDesiredTransitions(structs.MsgTypeTestSetup, 1002, m, evals)) 6474 6475 ws = memdb.NewWatchSet() 6476 out, err = state.AllocByID(ws, alloc.ID) 6477 require.Nil(err) 6478 require.NotNil(out.DesiredTransition.Migrate) 6479 require.False(*out.DesiredTransition.Migrate) 6480 require.EqualValues(1000, out.CreateIndex) 6481 require.EqualValues(1002, out.ModifyIndex) 6482 6483 index, err = state.Index("allocs") 6484 require.Nil(err) 6485 require.EqualValues(1002, index) 6486 6487 // Try with a bogus alloc id 6488 m = map[string]*structs.DesiredTransition{uuid.Generate(): t2} 6489 require.Nil(state.UpdateAllocsDesiredTransitions(structs.MsgTypeTestSetup, 1003, m, evals)) 6490 } 6491 6492 func TestStateStore_JobSummary(t *testing.T) { 6493 ci.Parallel(t) 6494 6495 state := testStateStore(t) 6496 6497 // Add a job 6498 job := mock.Job() 6499 state.UpsertJob(structs.MsgTypeTestSetup, 900, nil, job) 6500 6501 // Get the job back 6502 ws := memdb.NewWatchSet() 6503 outJob, _ := state.JobByID(ws, job.Namespace, job.ID) 6504 if outJob.CreateIndex != 900 { 6505 t.Fatalf("bad create index: %v", outJob.CreateIndex) 6506 } 6507 summary, _ := state.JobSummaryByID(ws, job.Namespace, job.ID) 6508 if summary.CreateIndex != 900 { 6509 t.Fatalf("bad create index: %v", summary.CreateIndex) 6510 } 6511 6512 // Upsert an allocation 6513 alloc := mock.Alloc() 6514 alloc.JobID = job.ID 6515 alloc.Job = job 6516 state.UpsertAllocs(structs.MsgTypeTestSetup, 910, []*structs.Allocation{alloc}) 6517 6518 // Update the alloc from client 6519 alloc1 := alloc.Copy() 6520 alloc1.ClientStatus = structs.AllocClientStatusPending 6521 alloc1.DesiredStatus = "" 6522 state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 920, []*structs.Allocation{alloc}) 6523 6524 alloc3 := alloc.Copy() 6525 alloc3.ClientStatus = structs.AllocClientStatusRunning 6526 alloc3.DesiredStatus = "" 6527 state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 930, []*structs.Allocation{alloc3}) 6528 6529 // Upsert the alloc 6530 alloc4 := alloc.Copy() 6531 alloc4.ClientStatus = structs.AllocClientStatusPending 6532 alloc4.DesiredStatus = structs.AllocDesiredStatusRun 6533 state.UpsertAllocs(structs.MsgTypeTestSetup, 950, []*structs.Allocation{alloc4}) 6534 6535 // Again upsert the alloc 6536 alloc5 := alloc.Copy() 6537 alloc5.ClientStatus = structs.AllocClientStatusPending 6538 alloc5.DesiredStatus = structs.AllocDesiredStatusRun 6539 state.UpsertAllocs(structs.MsgTypeTestSetup, 970, []*structs.Allocation{alloc5}) 6540 6541 if !watchFired(ws) { 6542 t.Fatalf("bad") 6543 } 6544 6545 expectedSummary := structs.JobSummary{ 6546 JobID: job.ID, 6547 Namespace: job.Namespace, 6548 Summary: map[string]structs.TaskGroupSummary{ 6549 "web": { 6550 Running: 1, 6551 }, 6552 }, 6553 Children: new(structs.JobChildrenSummary), 6554 CreateIndex: 900, 6555 ModifyIndex: 930, 6556 } 6557 6558 summary, _ = state.JobSummaryByID(ws, job.Namespace, job.ID) 6559 if !reflect.DeepEqual(&expectedSummary, summary) { 6560 t.Fatalf("expected: %#v, actual: %v", expectedSummary, summary) 6561 } 6562 6563 // De-register the job. 6564 state.DeleteJob(980, job.Namespace, job.ID) 6565 6566 // Shouldn't have any effect on the summary 6567 alloc6 := alloc.Copy() 6568 alloc6.ClientStatus = structs.AllocClientStatusRunning 6569 alloc6.DesiredStatus = "" 6570 state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 990, []*structs.Allocation{alloc6}) 6571 6572 // We shouldn't have any summary at this point 6573 summary, _ = state.JobSummaryByID(ws, job.Namespace, job.ID) 6574 if summary != nil { 6575 t.Fatalf("expected nil, actual: %#v", summary) 6576 } 6577 6578 // Re-register the same job 6579 job1 := mock.Job() 6580 job1.ID = job.ID 6581 state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job1) 6582 outJob2, _ := state.JobByID(ws, job1.Namespace, job1.ID) 6583 if outJob2.CreateIndex != 1000 { 6584 t.Fatalf("bad create index: %v", outJob2.CreateIndex) 6585 } 6586 summary, _ = state.JobSummaryByID(ws, job1.Namespace, job1.ID) 6587 if summary.CreateIndex != 1000 { 6588 t.Fatalf("bad create index: %v", summary.CreateIndex) 6589 } 6590 6591 // Upsert an allocation 6592 alloc7 := alloc.Copy() 6593 alloc7.JobID = outJob.ID 6594 alloc7.Job = outJob 6595 alloc7.ClientStatus = structs.AllocClientStatusComplete 6596 alloc7.DesiredStatus = structs.AllocDesiredStatusRun 6597 state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1020, []*structs.Allocation{alloc7}) 6598 6599 expectedSummary = structs.JobSummary{ 6600 JobID: job.ID, 6601 Namespace: job.Namespace, 6602 Summary: map[string]structs.TaskGroupSummary{ 6603 "web": {}, 6604 }, 6605 Children: new(structs.JobChildrenSummary), 6606 CreateIndex: 1000, 6607 ModifyIndex: 1000, 6608 } 6609 6610 summary, _ = state.JobSummaryByID(ws, job1.Namespace, job1.ID) 6611 if !reflect.DeepEqual(&expectedSummary, summary) { 6612 t.Fatalf("expected: %#v, actual: %#v", expectedSummary, summary) 6613 } 6614 } 6615 6616 func TestStateStore_ReconcileJobSummary(t *testing.T) { 6617 ci.Parallel(t) 6618 6619 state := testStateStore(t) 6620 6621 // Create an alloc 6622 alloc := mock.Alloc() 6623 6624 // Add another task group to the job 6625 tg2 := alloc.Job.TaskGroups[0].Copy() 6626 tg2.Name = "db" 6627 alloc.Job.TaskGroups = append(alloc.Job.TaskGroups, tg2) 6628 state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, alloc.Job) 6629 6630 // Create one more alloc for the db task group 6631 alloc2 := mock.Alloc() 6632 alloc2.TaskGroup = "db" 6633 alloc2.JobID = alloc.JobID 6634 alloc2.Job = alloc.Job 6635 6636 // Upserts the alloc 6637 state.UpsertAllocs(structs.MsgTypeTestSetup, 110, []*structs.Allocation{alloc, alloc2}) 6638 6639 // Change the state of the first alloc to running 6640 alloc3 := alloc.Copy() 6641 alloc3.ClientStatus = structs.AllocClientStatusRunning 6642 state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 120, []*structs.Allocation{alloc3}) 6643 6644 //Add some more allocs to the second tg 6645 alloc4 := mock.Alloc() 6646 alloc4.JobID = alloc.JobID 6647 alloc4.Job = alloc.Job 6648 alloc4.TaskGroup = "db" 6649 alloc5 := alloc4.Copy() 6650 alloc5.ClientStatus = structs.AllocClientStatusRunning 6651 6652 alloc6 := mock.Alloc() 6653 alloc6.JobID = alloc.JobID 6654 alloc6.Job = alloc.Job 6655 alloc6.TaskGroup = "db" 6656 alloc7 := alloc6.Copy() 6657 alloc7.ClientStatus = structs.AllocClientStatusComplete 6658 6659 alloc8 := mock.Alloc() 6660 alloc8.JobID = alloc.JobID 6661 alloc8.Job = alloc.Job 6662 alloc8.TaskGroup = "db" 6663 alloc9 := alloc8.Copy() 6664 alloc9.ClientStatus = structs.AllocClientStatusFailed 6665 6666 alloc10 := mock.Alloc() 6667 alloc10.JobID = alloc.JobID 6668 alloc10.Job = alloc.Job 6669 alloc10.TaskGroup = "db" 6670 alloc11 := alloc10.Copy() 6671 alloc11.ClientStatus = structs.AllocClientStatusLost 6672 6673 alloc12 := mock.Alloc() 6674 alloc12.JobID = alloc.JobID 6675 alloc12.Job = alloc.Job 6676 alloc12.TaskGroup = "db" 6677 alloc12.ClientStatus = structs.AllocClientStatusUnknown 6678 6679 state.UpsertAllocs(structs.MsgTypeTestSetup, 130, []*structs.Allocation{alloc4, alloc6, alloc8, alloc10, alloc12}) 6680 6681 state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 150, []*structs.Allocation{alloc5, alloc7, alloc9, alloc11}) 6682 6683 // DeleteJobSummary is a helper method and doesn't modify the indexes table 6684 state.DeleteJobSummary(130, alloc.Namespace, alloc.Job.ID) 6685 6686 state.ReconcileJobSummaries(120) 6687 6688 ws := memdb.NewWatchSet() 6689 summary, _ := state.JobSummaryByID(ws, alloc.Namespace, alloc.Job.ID) 6690 expectedSummary := structs.JobSummary{ 6691 JobID: alloc.Job.ID, 6692 Namespace: alloc.Namespace, 6693 Summary: map[string]structs.TaskGroupSummary{ 6694 "web": { 6695 Running: 1, 6696 }, 6697 "db": { 6698 Starting: 1, 6699 Running: 1, 6700 Failed: 1, 6701 Complete: 1, 6702 Lost: 1, 6703 Unknown: 1, 6704 }, 6705 }, 6706 CreateIndex: 100, 6707 ModifyIndex: 120, 6708 } 6709 if !reflect.DeepEqual(&expectedSummary, summary) { 6710 t.Fatalf("expected: %v, actual: %v", expectedSummary, summary) 6711 } 6712 } 6713 6714 func TestStateStore_ReconcileParentJobSummary(t *testing.T) { 6715 ci.Parallel(t) 6716 require := require.New(t) 6717 6718 state := testStateStore(t) 6719 6720 // Add a node 6721 node := mock.Node() 6722 state.UpsertNode(structs.MsgTypeTestSetup, 80, node) 6723 6724 // Make a parameterized job 6725 job1 := mock.BatchJob() 6726 job1.ID = "test" 6727 job1.ParameterizedJob = &structs.ParameterizedJobConfig{ 6728 Payload: "random", 6729 } 6730 job1.TaskGroups[0].Count = 1 6731 state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job1) 6732 6733 // Make a child job 6734 childJob := job1.Copy() 6735 childJob.ID = job1.ID + "dispatch-23423423" 6736 childJob.ParentID = job1.ID 6737 childJob.Dispatched = true 6738 childJob.Status = structs.JobStatusRunning 6739 6740 // Make some allocs for child job 6741 alloc := mock.Alloc() 6742 alloc.NodeID = node.ID 6743 alloc.Job = childJob 6744 alloc.JobID = childJob.ID 6745 alloc.ClientStatus = structs.AllocClientStatusRunning 6746 6747 alloc2 := mock.Alloc() 6748 alloc2.NodeID = node.ID 6749 alloc2.Job = childJob 6750 alloc2.JobID = childJob.ID 6751 alloc2.ClientStatus = structs.AllocClientStatusFailed 6752 6753 require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 110, nil, childJob)) 6754 require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 111, []*structs.Allocation{alloc, alloc2})) 6755 6756 // Make the summary incorrect in the state store 6757 summary, err := state.JobSummaryByID(nil, job1.Namespace, job1.ID) 6758 require.Nil(err) 6759 6760 summary.Children = nil 6761 summary.Summary = make(map[string]structs.TaskGroupSummary) 6762 summary.Summary["web"] = structs.TaskGroupSummary{ 6763 Queued: 1, 6764 } 6765 6766 // Delete the child job summary 6767 state.DeleteJobSummary(125, childJob.Namespace, childJob.ID) 6768 6769 state.ReconcileJobSummaries(120) 6770 6771 ws := memdb.NewWatchSet() 6772 6773 // Verify parent summary is corrected 6774 summary, _ = state.JobSummaryByID(ws, alloc.Namespace, job1.ID) 6775 expectedSummary := structs.JobSummary{ 6776 JobID: job1.ID, 6777 Namespace: job1.Namespace, 6778 Summary: make(map[string]structs.TaskGroupSummary), 6779 Children: &structs.JobChildrenSummary{ 6780 Running: 1, 6781 }, 6782 CreateIndex: 100, 6783 ModifyIndex: 120, 6784 } 6785 require.Equal(&expectedSummary, summary) 6786 6787 // Verify child job summary is also correct 6788 childSummary, _ := state.JobSummaryByID(ws, childJob.Namespace, childJob.ID) 6789 expectedChildSummary := structs.JobSummary{ 6790 JobID: childJob.ID, 6791 Namespace: childJob.Namespace, 6792 Summary: map[string]structs.TaskGroupSummary{ 6793 "web": { 6794 Running: 1, 6795 Failed: 1, 6796 }, 6797 }, 6798 CreateIndex: 110, 6799 ModifyIndex: 120, 6800 } 6801 require.Equal(&expectedChildSummary, childSummary) 6802 } 6803 6804 func TestStateStore_UpdateAlloc_JobNotPresent(t *testing.T) { 6805 ci.Parallel(t) 6806 6807 state := testStateStore(t) 6808 6809 alloc := mock.Alloc() 6810 state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, alloc.Job) 6811 state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{alloc}) 6812 6813 // Delete the job 6814 state.DeleteJob(300, alloc.Namespace, alloc.Job.ID) 6815 6816 // Update the alloc 6817 alloc1 := alloc.Copy() 6818 alloc1.ClientStatus = structs.AllocClientStatusRunning 6819 6820 // Updating allocation should not throw any error 6821 if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 400, []*structs.Allocation{alloc1}); err != nil { 6822 t.Fatalf("expect err: %v", err) 6823 } 6824 6825 // Re-Register the job 6826 state.UpsertJob(structs.MsgTypeTestSetup, 500, nil, alloc.Job) 6827 6828 // Update the alloc again 6829 alloc2 := alloc.Copy() 6830 alloc2.ClientStatus = structs.AllocClientStatusComplete 6831 if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 400, []*structs.Allocation{alloc1}); err != nil { 6832 t.Fatalf("expect err: %v", err) 6833 } 6834 6835 // Job Summary of the newly registered job shouldn't account for the 6836 // allocation update for the older job 6837 expectedSummary := structs.JobSummary{ 6838 JobID: alloc1.JobID, 6839 Namespace: alloc1.Namespace, 6840 Summary: map[string]structs.TaskGroupSummary{ 6841 "web": {}, 6842 }, 6843 Children: new(structs.JobChildrenSummary), 6844 CreateIndex: 500, 6845 ModifyIndex: 500, 6846 } 6847 6848 ws := memdb.NewWatchSet() 6849 summary, _ := state.JobSummaryByID(ws, alloc.Namespace, alloc.Job.ID) 6850 if !reflect.DeepEqual(&expectedSummary, summary) { 6851 t.Fatalf("expected: %v, actual: %v", expectedSummary, summary) 6852 } 6853 } 6854 6855 func TestStateStore_EvictAlloc_Alloc(t *testing.T) { 6856 ci.Parallel(t) 6857 6858 state := testStateStore(t) 6859 alloc := mock.Alloc() 6860 6861 state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)) 6862 err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}) 6863 if err != nil { 6864 t.Fatalf("err: %v", err) 6865 } 6866 6867 alloc2 := new(structs.Allocation) 6868 *alloc2 = *alloc 6869 alloc2.DesiredStatus = structs.AllocDesiredStatusEvict 6870 err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc2}) 6871 if err != nil { 6872 t.Fatalf("err: %v", err) 6873 } 6874 6875 ws := memdb.NewWatchSet() 6876 out, err := state.AllocByID(ws, alloc.ID) 6877 if err != nil { 6878 t.Fatalf("err: %v", err) 6879 } 6880 6881 if out.DesiredStatus != structs.AllocDesiredStatusEvict { 6882 t.Fatalf("bad: %#v %#v", alloc, out) 6883 } 6884 6885 index, err := state.Index("allocs") 6886 if err != nil { 6887 t.Fatalf("err: %v", err) 6888 } 6889 if index != 1001 { 6890 t.Fatalf("bad: %d", index) 6891 } 6892 } 6893 6894 func TestStateStore_AllocsByNode(t *testing.T) { 6895 ci.Parallel(t) 6896 6897 state := testStateStore(t) 6898 var allocs []*structs.Allocation 6899 6900 for i := 0; i < 10; i++ { 6901 alloc := mock.Alloc() 6902 alloc.NodeID = "foo" 6903 allocs = append(allocs, alloc) 6904 } 6905 6906 for idx, alloc := range allocs { 6907 state.UpsertJobSummary(uint64(900+idx), mock.JobSummary(alloc.JobID)) 6908 } 6909 6910 err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs) 6911 if err != nil { 6912 t.Fatalf("err: %v", err) 6913 } 6914 6915 ws := memdb.NewWatchSet() 6916 out, err := state.AllocsByNode(ws, "foo") 6917 if err != nil { 6918 t.Fatalf("err: %v", err) 6919 } 6920 6921 sort.Sort(AllocIDSort(allocs)) 6922 sort.Sort(AllocIDSort(out)) 6923 6924 if !reflect.DeepEqual(allocs, out) { 6925 t.Fatalf("bad: %#v %#v", allocs, out) 6926 } 6927 6928 if watchFired(ws) { 6929 t.Fatalf("bad") 6930 } 6931 } 6932 6933 func TestStateStore_AllocsByNodeTerminal(t *testing.T) { 6934 ci.Parallel(t) 6935 6936 state := testStateStore(t) 6937 var allocs, term, nonterm []*structs.Allocation 6938 6939 for i := 0; i < 10; i++ { 6940 alloc := mock.Alloc() 6941 alloc.NodeID = "foo" 6942 if i%2 == 0 { 6943 alloc.DesiredStatus = structs.AllocDesiredStatusStop 6944 term = append(term, alloc) 6945 } else { 6946 nonterm = append(nonterm, alloc) 6947 } 6948 allocs = append(allocs, alloc) 6949 } 6950 6951 for idx, alloc := range allocs { 6952 state.UpsertJobSummary(uint64(900+idx), mock.JobSummary(alloc.JobID)) 6953 } 6954 6955 err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs) 6956 if err != nil { 6957 t.Fatalf("err: %v", err) 6958 } 6959 6960 // Verify the terminal allocs 6961 ws := memdb.NewWatchSet() 6962 out, err := state.AllocsByNodeTerminal(ws, "foo", true) 6963 if err != nil { 6964 t.Fatalf("err: %v", err) 6965 } 6966 6967 sort.Sort(AllocIDSort(term)) 6968 sort.Sort(AllocIDSort(out)) 6969 6970 if !reflect.DeepEqual(term, out) { 6971 t.Fatalf("bad: %#v %#v", term, out) 6972 } 6973 6974 // Verify the non-terminal allocs 6975 out, err = state.AllocsByNodeTerminal(ws, "foo", false) 6976 if err != nil { 6977 t.Fatalf("err: %v", err) 6978 } 6979 6980 sort.Sort(AllocIDSort(nonterm)) 6981 sort.Sort(AllocIDSort(out)) 6982 6983 if !reflect.DeepEqual(nonterm, out) { 6984 t.Fatalf("bad: %#v %#v", nonterm, out) 6985 } 6986 6987 if watchFired(ws) { 6988 t.Fatalf("bad") 6989 } 6990 } 6991 6992 func TestStateStore_AllocsByJob(t *testing.T) { 6993 ci.Parallel(t) 6994 6995 state := testStateStore(t) 6996 var allocs []*structs.Allocation 6997 6998 for i := 0; i < 10; i++ { 6999 alloc := mock.Alloc() 7000 alloc.JobID = "foo" 7001 allocs = append(allocs, alloc) 7002 } 7003 7004 for i, alloc := range allocs { 7005 state.UpsertJobSummary(uint64(900+i), mock.JobSummary(alloc.JobID)) 7006 } 7007 7008 err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs) 7009 if err != nil { 7010 t.Fatalf("err: %v", err) 7011 } 7012 7013 ws := memdb.NewWatchSet() 7014 out, err := state.AllocsByJob(ws, mock.Alloc().Namespace, "foo", false) 7015 if err != nil { 7016 t.Fatalf("err: %v", err) 7017 } 7018 7019 sort.Sort(AllocIDSort(allocs)) 7020 sort.Sort(AllocIDSort(out)) 7021 7022 if !reflect.DeepEqual(allocs, out) { 7023 t.Fatalf("bad: %#v %#v", allocs, out) 7024 } 7025 7026 if watchFired(ws) { 7027 t.Fatalf("bad") 7028 } 7029 } 7030 7031 func TestStateStore_AllocsForRegisteredJob(t *testing.T) { 7032 ci.Parallel(t) 7033 7034 state := testStateStore(t) 7035 var allocs []*structs.Allocation 7036 var allocs1 []*structs.Allocation 7037 7038 job := mock.Job() 7039 job.ID = "foo" 7040 state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job) 7041 for i := 0; i < 3; i++ { 7042 alloc := mock.Alloc() 7043 alloc.Job = job 7044 alloc.JobID = job.ID 7045 allocs = append(allocs, alloc) 7046 } 7047 if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, allocs); err != nil { 7048 t.Fatalf("err: %v", err) 7049 } 7050 7051 if err := state.DeleteJob(250, job.Namespace, job.ID); err != nil { 7052 t.Fatalf("err: %v", err) 7053 } 7054 7055 job1 := mock.Job() 7056 job1.ID = "foo" 7057 job1.CreateIndex = 50 7058 state.UpsertJob(structs.MsgTypeTestSetup, 300, nil, job1) 7059 for i := 0; i < 4; i++ { 7060 alloc := mock.Alloc() 7061 alloc.Job = job1 7062 alloc.JobID = job1.ID 7063 allocs1 = append(allocs1, alloc) 7064 } 7065 7066 if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs1); err != nil { 7067 t.Fatalf("err: %v", err) 7068 } 7069 7070 ws := memdb.NewWatchSet() 7071 out, err := state.AllocsByJob(ws, job1.Namespace, job1.ID, true) 7072 if err != nil { 7073 t.Fatalf("err: %v", err) 7074 } 7075 7076 expected := len(allocs) + len(allocs1) 7077 if len(out) != expected { 7078 t.Fatalf("expected: %v, actual: %v", expected, len(out)) 7079 } 7080 7081 out1, err := state.AllocsByJob(ws, job1.Namespace, job1.ID, false) 7082 if err != nil { 7083 t.Fatalf("bad: %v", err) 7084 } 7085 7086 expected = len(allocs1) 7087 if len(out1) != expected { 7088 t.Fatalf("expected: %v, actual: %v", expected, len(out1)) 7089 } 7090 7091 if watchFired(ws) { 7092 t.Fatalf("bad") 7093 } 7094 } 7095 7096 func TestStateStore_AllocsByIDPrefix(t *testing.T) { 7097 ci.Parallel(t) 7098 7099 state := testStateStore(t) 7100 var allocs []*structs.Allocation 7101 7102 ids := []string{ 7103 "aaaaaaaa-7bfb-395d-eb95-0685af2176b2", 7104 "aaaaaaab-7bfb-395d-eb95-0685af2176b2", 7105 "aaaaaabb-7bfb-395d-eb95-0685af2176b2", 7106 "aaaaabbb-7bfb-395d-eb95-0685af2176b2", 7107 "aaaabbbb-7bfb-395d-eb95-0685af2176b2", 7108 "aaabbbbb-7bfb-395d-eb95-0685af2176b2", 7109 "aabbbbbb-7bfb-395d-eb95-0685af2176b2", 7110 "abbbbbbb-7bfb-395d-eb95-0685af2176b2", 7111 "bbbbbbbb-7bfb-395d-eb95-0685af2176b2", 7112 } 7113 for i := 0; i < 9; i++ { 7114 alloc := mock.Alloc() 7115 alloc.ID = ids[i] 7116 allocs = append(allocs, alloc) 7117 } 7118 7119 for i, alloc := range allocs { 7120 state.UpsertJobSummary(uint64(900+i), mock.JobSummary(alloc.JobID)) 7121 } 7122 7123 err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs) 7124 require.NoError(t, err) 7125 7126 gatherAllocs := func(iter memdb.ResultIterator) []*structs.Allocation { 7127 var allocs []*structs.Allocation 7128 for { 7129 raw := iter.Next() 7130 if raw == nil { 7131 break 7132 } 7133 allocs = append(allocs, raw.(*structs.Allocation)) 7134 } 7135 return allocs 7136 } 7137 7138 t.Run("allocs by prefix", func(t *testing.T) { 7139 ws := memdb.NewWatchSet() 7140 iter, err := state.AllocsByIDPrefix(ws, structs.DefaultNamespace, "aaaa", SortDefault) 7141 require.NoError(t, err) 7142 7143 out := gatherAllocs(iter) 7144 require.Len(t, out, 5, "expected five allocations") 7145 7146 got := []string{} 7147 for _, a := range out { 7148 got = append(got, a.ID) 7149 } 7150 expected := []string{ 7151 "aaaaaaaa-7bfb-395d-eb95-0685af2176b2", 7152 "aaaaaaab-7bfb-395d-eb95-0685af2176b2", 7153 "aaaaaabb-7bfb-395d-eb95-0685af2176b2", 7154 "aaaaabbb-7bfb-395d-eb95-0685af2176b2", 7155 "aaaabbbb-7bfb-395d-eb95-0685af2176b2", 7156 } 7157 require.Equal(t, expected, got) 7158 require.False(t, watchFired(ws)) 7159 }) 7160 7161 t.Run("invalid prefix", func(t *testing.T) { 7162 ws := memdb.NewWatchSet() 7163 iter, err := state.AllocsByIDPrefix(ws, structs.DefaultNamespace, "b-a7bfb", SortDefault) 7164 require.NoError(t, err) 7165 7166 out := gatherAllocs(iter) 7167 require.Len(t, out, 0) 7168 require.False(t, watchFired(ws)) 7169 }) 7170 7171 t.Run("reverse", func(t *testing.T) { 7172 ws := memdb.NewWatchSet() 7173 iter, err := state.AllocsByIDPrefix(ws, structs.DefaultNamespace, "aaaa", SortReverse) 7174 require.NoError(t, err) 7175 7176 out := gatherAllocs(iter) 7177 require.Len(t, out, 5, "expected five allocations") 7178 7179 got := []string{} 7180 for _, a := range out { 7181 got = append(got, a.ID) 7182 } 7183 expected := []string{ 7184 "aaaabbbb-7bfb-395d-eb95-0685af2176b2", 7185 "aaaaabbb-7bfb-395d-eb95-0685af2176b2", 7186 "aaaaaabb-7bfb-395d-eb95-0685af2176b2", 7187 "aaaaaaab-7bfb-395d-eb95-0685af2176b2", 7188 "aaaaaaaa-7bfb-395d-eb95-0685af2176b2", 7189 } 7190 require.Equal(t, expected, got) 7191 require.False(t, watchFired(ws)) 7192 }) 7193 } 7194 7195 func TestStateStore_AllocsByIDPrefix_Namespaces(t *testing.T) { 7196 ci.Parallel(t) 7197 7198 state := testStateStore(t) 7199 alloc1 := mock.Alloc() 7200 alloc1.ID = "aabbbbbb-7bfb-395d-eb95-0685af2176b2" 7201 alloc2 := mock.Alloc() 7202 alloc2.ID = "aabbcbbb-7bfb-395d-eb95-0685af2176b2" 7203 sharedPrefix := "aabb" 7204 7205 ns1 := mock.Namespace() 7206 ns1.Name = "namespace1" 7207 ns2 := mock.Namespace() 7208 ns2.Name = "namespace2" 7209 7210 alloc1.Namespace = ns1.Name 7211 alloc2.Namespace = ns2.Name 7212 7213 require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) 7214 require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) 7215 7216 gatherAllocs := func(iter memdb.ResultIterator) []*structs.Allocation { 7217 var allocs []*structs.Allocation 7218 for { 7219 raw := iter.Next() 7220 if raw == nil { 7221 break 7222 } 7223 alloc := raw.(*structs.Allocation) 7224 allocs = append(allocs, alloc) 7225 } 7226 return allocs 7227 } 7228 7229 ws := memdb.NewWatchSet() 7230 iter1, err := state.AllocsByIDPrefix(ws, ns1.Name, sharedPrefix, SortDefault) 7231 require.NoError(t, err) 7232 iter2, err := state.AllocsByIDPrefix(ws, ns2.Name, sharedPrefix, SortDefault) 7233 require.NoError(t, err) 7234 7235 allocsNs1 := gatherAllocs(iter1) 7236 allocsNs2 := gatherAllocs(iter2) 7237 require.Len(t, allocsNs1, 1) 7238 require.Len(t, allocsNs2, 1) 7239 7240 iter1, err = state.AllocsByIDPrefix(ws, ns1.Name, alloc1.ID[:8], SortDefault) 7241 require.NoError(t, err) 7242 7243 allocsNs1 = gatherAllocs(iter1) 7244 require.Len(t, allocsNs1, 1) 7245 require.False(t, watchFired(ws)) 7246 } 7247 7248 func TestStateStore_Allocs(t *testing.T) { 7249 ci.Parallel(t) 7250 7251 state := testStateStore(t) 7252 var allocs []*structs.Allocation 7253 7254 for i := 0; i < 10; i++ { 7255 alloc := mock.Alloc() 7256 allocs = append(allocs, alloc) 7257 } 7258 for i, alloc := range allocs { 7259 state.UpsertJobSummary(uint64(900+i), mock.JobSummary(alloc.JobID)) 7260 } 7261 7262 err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs) 7263 if err != nil { 7264 t.Fatalf("err: %v", err) 7265 } 7266 7267 ws := memdb.NewWatchSet() 7268 iter, err := state.Allocs(ws, SortDefault) 7269 if err != nil { 7270 t.Fatalf("err: %v", err) 7271 } 7272 7273 var out []*structs.Allocation 7274 for { 7275 raw := iter.Next() 7276 if raw == nil { 7277 break 7278 } 7279 out = append(out, raw.(*structs.Allocation)) 7280 } 7281 7282 sort.Sort(AllocIDSort(allocs)) 7283 sort.Sort(AllocIDSort(out)) 7284 7285 if !reflect.DeepEqual(allocs, out) { 7286 t.Fatalf("bad: %#v %#v", allocs, out) 7287 } 7288 7289 if watchFired(ws) { 7290 t.Fatalf("bad") 7291 } 7292 } 7293 7294 func TestStateStore_Allocs_PrevAlloc(t *testing.T) { 7295 ci.Parallel(t) 7296 7297 state := testStateStore(t) 7298 var allocs []*structs.Allocation 7299 7300 require := require.New(t) 7301 for i := 0; i < 5; i++ { 7302 alloc := mock.Alloc() 7303 allocs = append(allocs, alloc) 7304 } 7305 for i, alloc := range allocs { 7306 state.UpsertJobSummary(uint64(900+i), mock.JobSummary(alloc.JobID)) 7307 } 7308 // Set some previous alloc ids 7309 allocs[1].PreviousAllocation = allocs[0].ID 7310 allocs[2].PreviousAllocation = allocs[1].ID 7311 7312 err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs) 7313 require.Nil(err) 7314 7315 ws := memdb.NewWatchSet() 7316 iter, err := state.Allocs(ws, SortDefault) 7317 require.Nil(err) 7318 7319 var out []*structs.Allocation 7320 for { 7321 raw := iter.Next() 7322 if raw == nil { 7323 break 7324 } 7325 out = append(out, raw.(*structs.Allocation)) 7326 } 7327 7328 // Set expected NextAllocation fields 7329 allocs[0].NextAllocation = allocs[1].ID 7330 allocs[1].NextAllocation = allocs[2].ID 7331 7332 sort.Sort(AllocIDSort(allocs)) 7333 sort.Sort(AllocIDSort(out)) 7334 7335 require.Equal(allocs, out) 7336 require.False(watchFired(ws)) 7337 7338 // Insert another alloc, verify index of previous alloc also got updated 7339 alloc := mock.Alloc() 7340 alloc.PreviousAllocation = allocs[0].ID 7341 err = state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}) 7342 require.Nil(err) 7343 alloc0, err := state.AllocByID(nil, allocs[0].ID) 7344 require.Nil(err) 7345 require.Equal(alloc0.ModifyIndex, uint64(1001)) 7346 } 7347 7348 func TestStateStore_SetJobStatus_ForceStatus(t *testing.T) { 7349 ci.Parallel(t) 7350 7351 index := uint64(0) 7352 state := testStateStore(t) 7353 txn := state.db.WriteTxn(index) 7354 7355 // Create and insert a mock job. 7356 job := mock.Job() 7357 job.Status = "" 7358 job.ModifyIndex = index 7359 if err := txn.Insert("jobs", job); err != nil { 7360 t.Fatalf("job insert failed: %v", err) 7361 } 7362 7363 exp := "foobar" 7364 index = uint64(1000) 7365 if err := state.setJobStatus(index, txn, job, false, exp); err != nil { 7366 t.Fatalf("setJobStatus() failed: %v", err) 7367 } 7368 7369 i, err := txn.First("jobs", "id", job.Namespace, job.ID) 7370 if err != nil { 7371 t.Fatalf("job lookup failed: %v", err) 7372 } 7373 updated := i.(*structs.Job) 7374 7375 if updated.Status != exp { 7376 t.Fatalf("setJobStatus() set %v; expected %v", updated.Status, exp) 7377 } 7378 7379 if updated.ModifyIndex != index { 7380 t.Fatalf("setJobStatus() set %d; expected %d", updated.ModifyIndex, index) 7381 } 7382 } 7383 7384 func TestStateStore_SetJobStatus_NoOp(t *testing.T) { 7385 ci.Parallel(t) 7386 7387 index := uint64(0) 7388 state := testStateStore(t) 7389 txn := state.db.WriteTxn(index) 7390 7391 // Create and insert a mock job that should be pending. 7392 job := mock.Job() 7393 job.Status = structs.JobStatusPending 7394 job.ModifyIndex = 10 7395 if err := txn.Insert("jobs", job); err != nil { 7396 t.Fatalf("job insert failed: %v", err) 7397 } 7398 7399 index = uint64(1000) 7400 if err := state.setJobStatus(index, txn, job, false, ""); err != nil { 7401 t.Fatalf("setJobStatus() failed: %v", err) 7402 } 7403 7404 i, err := txn.First("jobs", "id", job.Namespace, job.ID) 7405 if err != nil { 7406 t.Fatalf("job lookup failed: %v", err) 7407 } 7408 updated := i.(*structs.Job) 7409 7410 if updated.ModifyIndex == index { 7411 t.Fatalf("setJobStatus() should have been a no-op") 7412 } 7413 } 7414 7415 func TestStateStore_SetJobStatus(t *testing.T) { 7416 ci.Parallel(t) 7417 7418 state := testStateStore(t) 7419 txn := state.db.WriteTxn(uint64(0)) 7420 7421 // Create and insert a mock job that should be pending but has an incorrect 7422 // status. 7423 job := mock.Job() 7424 job.Status = "foobar" 7425 job.ModifyIndex = 10 7426 if err := txn.Insert("jobs", job); err != nil { 7427 t.Fatalf("job insert failed: %v", err) 7428 } 7429 7430 index := uint64(1000) 7431 if err := state.setJobStatus(index, txn, job, false, ""); err != nil { 7432 t.Fatalf("setJobStatus() failed: %v", err) 7433 } 7434 7435 i, err := txn.First("jobs", "id", job.Namespace, job.ID) 7436 if err != nil { 7437 t.Fatalf("job lookup failed: %v", err) 7438 } 7439 updated := i.(*structs.Job) 7440 7441 if updated.Status != structs.JobStatusPending { 7442 t.Fatalf("setJobStatus() set %v; expected %v", updated.Status, structs.JobStatusPending) 7443 } 7444 7445 if updated.ModifyIndex != index { 7446 t.Fatalf("setJobStatus() set %d; expected %d", updated.ModifyIndex, index) 7447 } 7448 } 7449 7450 func TestStateStore_GetJobStatus_NoEvalsOrAllocs(t *testing.T) { 7451 ci.Parallel(t) 7452 7453 job := mock.Job() 7454 state := testStateStore(t) 7455 txn := state.db.ReadTxn() 7456 status, err := state.getJobStatus(txn, job, false) 7457 if err != nil { 7458 t.Fatalf("getJobStatus() failed: %v", err) 7459 } 7460 7461 if status != structs.JobStatusPending { 7462 t.Fatalf("getJobStatus() returned %v; expected %v", status, structs.JobStatusPending) 7463 } 7464 } 7465 7466 func TestStateStore_GetJobStatus_NoEvalsOrAllocs_Periodic(t *testing.T) { 7467 ci.Parallel(t) 7468 7469 job := mock.PeriodicJob() 7470 state := testStateStore(t) 7471 txn := state.db.ReadTxn() 7472 status, err := state.getJobStatus(txn, job, false) 7473 if err != nil { 7474 t.Fatalf("getJobStatus() failed: %v", err) 7475 } 7476 7477 if status != structs.JobStatusRunning { 7478 t.Fatalf("getJobStatus() returned %v; expected %v", status, structs.JobStatusRunning) 7479 } 7480 } 7481 7482 func TestStateStore_GetJobStatus_NoEvalsOrAllocs_EvalDelete(t *testing.T) { 7483 ci.Parallel(t) 7484 7485 job := mock.Job() 7486 state := testStateStore(t) 7487 txn := state.db.ReadTxn() 7488 status, err := state.getJobStatus(txn, job, true) 7489 if err != nil { 7490 t.Fatalf("getJobStatus() failed: %v", err) 7491 } 7492 7493 if status != structs.JobStatusDead { 7494 t.Fatalf("getJobStatus() returned %v; expected %v", status, structs.JobStatusDead) 7495 } 7496 } 7497 7498 func TestStateStore_GetJobStatus_DeadEvalsAndAllocs(t *testing.T) { 7499 ci.Parallel(t) 7500 7501 state := testStateStore(t) 7502 job := mock.Job() 7503 7504 // Create a mock alloc that is dead. 7505 alloc := mock.Alloc() 7506 alloc.JobID = job.ID 7507 alloc.DesiredStatus = structs.AllocDesiredStatusStop 7508 state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)) 7509 if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}); err != nil { 7510 t.Fatalf("err: %v", err) 7511 } 7512 7513 // Create a mock eval that is complete 7514 eval := mock.Eval() 7515 eval.JobID = job.ID 7516 eval.Status = structs.EvalStatusComplete 7517 if err := state.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{eval}); err != nil { 7518 t.Fatalf("err: %v", err) 7519 } 7520 7521 txn := state.db.ReadTxn() 7522 status, err := state.getJobStatus(txn, job, false) 7523 if err != nil { 7524 t.Fatalf("getJobStatus() failed: %v", err) 7525 } 7526 7527 if status != structs.JobStatusDead { 7528 t.Fatalf("getJobStatus() returned %v; expected %v", status, structs.JobStatusDead) 7529 } 7530 } 7531 7532 func TestStateStore_GetJobStatus_RunningAlloc(t *testing.T) { 7533 ci.Parallel(t) 7534 7535 state := testStateStore(t) 7536 job := mock.Job() 7537 7538 // Create a mock alloc that is running. 7539 alloc := mock.Alloc() 7540 alloc.JobID = job.ID 7541 alloc.DesiredStatus = structs.AllocDesiredStatusRun 7542 state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)) 7543 if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}); err != nil { 7544 t.Fatalf("err: %v", err) 7545 } 7546 7547 txn := state.db.ReadTxn() 7548 status, err := state.getJobStatus(txn, job, true) 7549 if err != nil { 7550 t.Fatalf("getJobStatus() failed: %v", err) 7551 } 7552 7553 if status != structs.JobStatusRunning { 7554 t.Fatalf("getJobStatus() returned %v; expected %v", status, structs.JobStatusRunning) 7555 } 7556 } 7557 7558 func TestStateStore_GetJobStatus_PeriodicJob(t *testing.T) { 7559 ci.Parallel(t) 7560 7561 state := testStateStore(t) 7562 job := mock.PeriodicJob() 7563 7564 txn := state.db.ReadTxn() 7565 status, err := state.getJobStatus(txn, job, false) 7566 if err != nil { 7567 t.Fatalf("getJobStatus() failed: %v", err) 7568 } 7569 7570 if status != structs.JobStatusRunning { 7571 t.Fatalf("getJobStatus() returned %v; expected %v", status, structs.JobStatusRunning) 7572 } 7573 7574 // Mark it as stopped 7575 job.Stop = true 7576 status, err = state.getJobStatus(txn, job, false) 7577 if err != nil { 7578 t.Fatalf("getJobStatus() failed: %v", err) 7579 } 7580 7581 if status != structs.JobStatusDead { 7582 t.Fatalf("getJobStatus() returned %v; expected %v", status, structs.JobStatusDead) 7583 } 7584 } 7585 7586 func TestStateStore_GetJobStatus_ParameterizedJob(t *testing.T) { 7587 ci.Parallel(t) 7588 7589 state := testStateStore(t) 7590 job := mock.Job() 7591 job.ParameterizedJob = &structs.ParameterizedJobConfig{} 7592 7593 txn := state.db.ReadTxn() 7594 status, err := state.getJobStatus(txn, job, false) 7595 if err != nil { 7596 t.Fatalf("getJobStatus() failed: %v", err) 7597 } 7598 7599 if status != structs.JobStatusRunning { 7600 t.Fatalf("getJobStatus() returned %v; expected %v", status, structs.JobStatusRunning) 7601 } 7602 7603 // Mark it as stopped 7604 job.Stop = true 7605 status, err = state.getJobStatus(txn, job, false) 7606 if err != nil { 7607 t.Fatalf("getJobStatus() failed: %v", err) 7608 } 7609 7610 if status != structs.JobStatusDead { 7611 t.Fatalf("getJobStatus() returned %v; expected %v", status, structs.JobStatusDead) 7612 } 7613 } 7614 7615 func TestStateStore_SetJobStatus_PendingEval(t *testing.T) { 7616 ci.Parallel(t) 7617 7618 state := testStateStore(t) 7619 job := mock.Job() 7620 7621 // Create a mock eval that is pending. 7622 eval := mock.Eval() 7623 eval.JobID = job.ID 7624 eval.Status = structs.EvalStatusPending 7625 if err := state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval}); err != nil { 7626 t.Fatalf("err: %v", err) 7627 } 7628 7629 txn := state.db.ReadTxn() 7630 status, err := state.getJobStatus(txn, job, true) 7631 if err != nil { 7632 t.Fatalf("getJobStatus() failed: %v", err) 7633 } 7634 7635 if status != structs.JobStatusPending { 7636 t.Fatalf("getJobStatus() returned %v; expected %v", status, structs.JobStatusPending) 7637 } 7638 } 7639 7640 // TestStateStore_SetJobStatus_SystemJob asserts that system jobs are still 7641 // considered running until explicitly stopped. 7642 func TestStateStore_SetJobStatus_SystemJob(t *testing.T) { 7643 ci.Parallel(t) 7644 7645 state := testStateStore(t) 7646 job := mock.SystemJob() 7647 7648 // Create a mock eval that is pending. 7649 eval := mock.Eval() 7650 eval.JobID = job.ID 7651 eval.Type = job.Type 7652 eval.Status = structs.EvalStatusComplete 7653 if err := state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval}); err != nil { 7654 t.Fatalf("err: %v", err) 7655 } 7656 7657 txn := state.db.ReadTxn() 7658 status, err := state.getJobStatus(txn, job, true) 7659 if err != nil { 7660 t.Fatalf("getJobStatus() failed: %v", err) 7661 } 7662 7663 if expected := structs.JobStatusRunning; status != expected { 7664 t.Fatalf("getJobStatus() returned %v; expected %v", status, expected) 7665 } 7666 7667 // Stop the job 7668 job.Stop = true 7669 status, err = state.getJobStatus(txn, job, true) 7670 if err != nil { 7671 t.Fatalf("getJobStatus() failed: %v", err) 7672 } 7673 7674 if expected := structs.JobStatusDead; status != expected { 7675 t.Fatalf("getJobStatus() returned %v; expected %v", status, expected) 7676 } 7677 } 7678 7679 func TestStateJobSummary_UpdateJobCount(t *testing.T) { 7680 ci.Parallel(t) 7681 7682 state := testStateStore(t) 7683 alloc := mock.Alloc() 7684 job := alloc.Job 7685 job.TaskGroups[0].Count = 3 7686 7687 // Create watchsets so we can test that upsert fires the watch 7688 ws := memdb.NewWatchSet() 7689 if _, err := state.JobSummaryByID(ws, job.Namespace, job.ID); err != nil { 7690 t.Fatalf("bad: %v", err) 7691 } 7692 7693 if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job); err != nil { 7694 t.Fatalf("err: %v", err) 7695 } 7696 7697 if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}); err != nil { 7698 t.Fatalf("err: %v", err) 7699 } 7700 7701 if !watchFired(ws) { 7702 t.Fatalf("bad") 7703 } 7704 7705 ws = memdb.NewWatchSet() 7706 summary, _ := state.JobSummaryByID(ws, job.Namespace, job.ID) 7707 expectedSummary := structs.JobSummary{ 7708 JobID: job.ID, 7709 Namespace: job.Namespace, 7710 Summary: map[string]structs.TaskGroupSummary{ 7711 "web": { 7712 Starting: 1, 7713 }, 7714 }, 7715 Children: new(structs.JobChildrenSummary), 7716 CreateIndex: 1000, 7717 ModifyIndex: 1001, 7718 } 7719 if !reflect.DeepEqual(summary, &expectedSummary) { 7720 t.Fatalf("expected: %v, actual: %v", expectedSummary, summary) 7721 } 7722 7723 // Create watchsets so we can test that upsert fires the watch 7724 ws2 := memdb.NewWatchSet() 7725 if _, err := state.JobSummaryByID(ws2, job.Namespace, job.ID); err != nil { 7726 t.Fatalf("bad: %v", err) 7727 } 7728 7729 alloc2 := mock.Alloc() 7730 alloc2.Job = job 7731 alloc2.JobID = job.ID 7732 7733 alloc3 := mock.Alloc() 7734 alloc3.Job = job 7735 alloc3.JobID = job.ID 7736 7737 if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc2, alloc3}); err != nil { 7738 t.Fatalf("err: %v", err) 7739 } 7740 7741 if !watchFired(ws2) { 7742 t.Fatalf("bad") 7743 } 7744 7745 outA, _ := state.AllocByID(ws, alloc3.ID) 7746 7747 summary, _ = state.JobSummaryByID(ws, job.Namespace, job.ID) 7748 expectedSummary = structs.JobSummary{ 7749 JobID: job.ID, 7750 Namespace: job.Namespace, 7751 Summary: map[string]structs.TaskGroupSummary{ 7752 "web": { 7753 Starting: 3, 7754 }, 7755 }, 7756 Children: new(structs.JobChildrenSummary), 7757 CreateIndex: job.CreateIndex, 7758 ModifyIndex: outA.ModifyIndex, 7759 } 7760 if !reflect.DeepEqual(summary, &expectedSummary) { 7761 t.Fatalf("expected summary: %v, actual: %v", expectedSummary, summary) 7762 } 7763 7764 // Create watchsets so we can test that upsert fires the watch 7765 ws3 := memdb.NewWatchSet() 7766 if _, err := state.JobSummaryByID(ws3, job.Namespace, job.ID); err != nil { 7767 t.Fatalf("bad: %v", err) 7768 } 7769 7770 alloc4 := mock.Alloc() 7771 alloc4.ID = alloc2.ID 7772 alloc4.Job = alloc2.Job 7773 alloc4.JobID = alloc2.JobID 7774 alloc4.ClientStatus = structs.AllocClientStatusComplete 7775 7776 alloc5 := mock.Alloc() 7777 alloc5.ID = alloc3.ID 7778 alloc5.Job = alloc3.Job 7779 alloc5.JobID = alloc3.JobID 7780 alloc5.ClientStatus = structs.AllocClientStatusComplete 7781 7782 if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1004, []*structs.Allocation{alloc4, alloc5}); err != nil { 7783 t.Fatalf("err: %v", err) 7784 } 7785 7786 if !watchFired(ws2) { 7787 t.Fatalf("bad") 7788 } 7789 7790 outA, _ = state.AllocByID(ws, alloc5.ID) 7791 summary, _ = state.JobSummaryByID(ws, job.Namespace, job.ID) 7792 expectedSummary = structs.JobSummary{ 7793 JobID: job.ID, 7794 Namespace: job.Namespace, 7795 Summary: map[string]structs.TaskGroupSummary{ 7796 "web": { 7797 Complete: 2, 7798 Starting: 1, 7799 }, 7800 }, 7801 Children: new(structs.JobChildrenSummary), 7802 CreateIndex: job.CreateIndex, 7803 ModifyIndex: outA.ModifyIndex, 7804 } 7805 if !reflect.DeepEqual(summary, &expectedSummary) { 7806 t.Fatalf("expected: %v, actual: %v", expectedSummary, summary) 7807 } 7808 } 7809 7810 func TestJobSummary_UpdateClientStatus(t *testing.T) { 7811 ci.Parallel(t) 7812 7813 state := testStateStore(t) 7814 alloc := mock.Alloc() 7815 job := alloc.Job 7816 job.TaskGroups[0].Count = 3 7817 7818 alloc2 := mock.Alloc() 7819 alloc2.Job = job 7820 alloc2.JobID = job.ID 7821 7822 alloc3 := mock.Alloc() 7823 alloc3.Job = job 7824 alloc3.JobID = job.ID 7825 7826 err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) 7827 if err != nil { 7828 t.Fatalf("err: %v", err) 7829 } 7830 7831 if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc, alloc2, alloc3}); err != nil { 7832 t.Fatalf("err: %v", err) 7833 } 7834 7835 ws := memdb.NewWatchSet() 7836 summary, _ := state.JobSummaryByID(ws, job.Namespace, job.ID) 7837 if summary.Summary["web"].Starting != 3 { 7838 t.Fatalf("bad job summary: %v", summary) 7839 } 7840 7841 alloc4 := mock.Alloc() 7842 alloc4.ID = alloc2.ID 7843 alloc4.Job = alloc2.Job 7844 alloc4.JobID = alloc2.JobID 7845 alloc4.ClientStatus = structs.AllocClientStatusComplete 7846 7847 alloc5 := mock.Alloc() 7848 alloc5.ID = alloc3.ID 7849 alloc5.Job = alloc3.Job 7850 alloc5.JobID = alloc3.JobID 7851 alloc5.ClientStatus = structs.AllocClientStatusFailed 7852 7853 alloc6 := mock.Alloc() 7854 alloc6.ID = alloc.ID 7855 alloc6.Job = alloc.Job 7856 alloc6.JobID = alloc.JobID 7857 alloc6.ClientStatus = structs.AllocClientStatusRunning 7858 7859 if err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc4, alloc5, alloc6}); err != nil { 7860 t.Fatalf("err: %v", err) 7861 } 7862 7863 if !watchFired(ws) { 7864 t.Fatalf("bad") 7865 } 7866 7867 summary, _ = state.JobSummaryByID(ws, job.Namespace, job.ID) 7868 if summary.Summary["web"].Running != 1 || summary.Summary["web"].Failed != 1 || summary.Summary["web"].Complete != 1 { 7869 t.Fatalf("bad job summary: %v", summary) 7870 } 7871 7872 alloc7 := mock.Alloc() 7873 alloc7.Job = alloc.Job 7874 alloc7.JobID = alloc.JobID 7875 7876 if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{alloc7}); err != nil { 7877 t.Fatalf("err: %v", err) 7878 } 7879 summary, _ = state.JobSummaryByID(ws, job.Namespace, job.ID) 7880 if summary.Summary["web"].Starting != 1 || summary.Summary["web"].Running != 1 || summary.Summary["web"].Failed != 1 || summary.Summary["web"].Complete != 1 { 7881 t.Fatalf("bad job summary: %v", summary) 7882 } 7883 } 7884 7885 // Test that nonexistent deployment can't be updated 7886 func TestStateStore_UpsertDeploymentStatusUpdate_Nonexistent(t *testing.T) { 7887 ci.Parallel(t) 7888 7889 state := testStateStore(t) 7890 7891 // Update the nonexistent deployment 7892 req := &structs.DeploymentStatusUpdateRequest{ 7893 DeploymentUpdate: &structs.DeploymentStatusUpdate{ 7894 DeploymentID: uuid.Generate(), 7895 Status: structs.DeploymentStatusRunning, 7896 }, 7897 } 7898 err := state.UpdateDeploymentStatus(structs.MsgTypeTestSetup, 2, req) 7899 if err == nil || !strings.Contains(err.Error(), "does not exist") { 7900 t.Fatalf("expected error updating the status because the deployment doesn't exist") 7901 } 7902 } 7903 7904 // Test that terminal deployment can't be updated 7905 func TestStateStore_UpsertDeploymentStatusUpdate_Terminal(t *testing.T) { 7906 ci.Parallel(t) 7907 7908 state := testStateStore(t) 7909 7910 // Insert a terminal deployment 7911 d := mock.Deployment() 7912 d.Status = structs.DeploymentStatusFailed 7913 7914 if err := state.UpsertDeployment(1, d); err != nil { 7915 t.Fatalf("bad: %v", err) 7916 } 7917 7918 // Update the deployment 7919 req := &structs.DeploymentStatusUpdateRequest{ 7920 DeploymentUpdate: &structs.DeploymentStatusUpdate{ 7921 DeploymentID: d.ID, 7922 Status: structs.DeploymentStatusRunning, 7923 }, 7924 } 7925 err := state.UpdateDeploymentStatus(structs.MsgTypeTestSetup, 2, req) 7926 if err == nil || !strings.Contains(err.Error(), "has terminal status") { 7927 t.Fatalf("expected error updating the status because the deployment is terminal") 7928 } 7929 } 7930 7931 // Test that a non terminal deployment is updated and that a job and eval are 7932 // created. 7933 func TestStateStore_UpsertDeploymentStatusUpdate_NonTerminal(t *testing.T) { 7934 ci.Parallel(t) 7935 7936 state := testStateStore(t) 7937 7938 // Insert a deployment 7939 d := mock.Deployment() 7940 if err := state.UpsertDeployment(1, d); err != nil { 7941 t.Fatalf("bad: %v", err) 7942 } 7943 7944 // Create an eval and a job 7945 e := mock.Eval() 7946 j := mock.Job() 7947 7948 // Update the deployment 7949 status, desc := structs.DeploymentStatusFailed, "foo" 7950 req := &structs.DeploymentStatusUpdateRequest{ 7951 DeploymentUpdate: &structs.DeploymentStatusUpdate{ 7952 DeploymentID: d.ID, 7953 Status: status, 7954 StatusDescription: desc, 7955 }, 7956 Job: j, 7957 Eval: e, 7958 } 7959 err := state.UpdateDeploymentStatus(structs.MsgTypeTestSetup, 2, req) 7960 if err != nil { 7961 t.Fatalf("bad: %v", err) 7962 } 7963 7964 // Check that the status was updated properly 7965 ws := memdb.NewWatchSet() 7966 dout, err := state.DeploymentByID(ws, d.ID) 7967 if err != nil { 7968 t.Fatalf("bad: %v", err) 7969 } 7970 if dout.Status != status || dout.StatusDescription != desc { 7971 t.Fatalf("bad: %#v", dout) 7972 } 7973 7974 // Check that the evaluation was created 7975 eout, _ := state.EvalByID(ws, e.ID) 7976 if err != nil { 7977 t.Fatalf("bad: %v", err) 7978 } 7979 if eout == nil { 7980 t.Fatalf("bad: %#v", eout) 7981 } 7982 7983 // Check that the job was created 7984 jout, _ := state.JobByID(ws, j.Namespace, j.ID) 7985 if err != nil { 7986 t.Fatalf("bad: %v", err) 7987 } 7988 if jout == nil { 7989 t.Fatalf("bad: %#v", jout) 7990 } 7991 } 7992 7993 // Test that when a deployment is updated to successful the job is updated to 7994 // stable 7995 func TestStateStore_UpsertDeploymentStatusUpdate_Successful(t *testing.T) { 7996 ci.Parallel(t) 7997 7998 state := testStateStore(t) 7999 8000 // Insert a job 8001 job := mock.Job() 8002 if err := state.UpsertJob(structs.MsgTypeTestSetup, 1, nil, job); err != nil { 8003 t.Fatalf("bad: %v", err) 8004 } 8005 8006 // Insert a deployment 8007 d := structs.NewDeployment(job, 50) 8008 if err := state.UpsertDeployment(2, d); err != nil { 8009 t.Fatalf("bad: %v", err) 8010 } 8011 8012 // Update the deployment 8013 req := &structs.DeploymentStatusUpdateRequest{ 8014 DeploymentUpdate: &structs.DeploymentStatusUpdate{ 8015 DeploymentID: d.ID, 8016 Status: structs.DeploymentStatusSuccessful, 8017 StatusDescription: structs.DeploymentStatusDescriptionSuccessful, 8018 }, 8019 } 8020 err := state.UpdateDeploymentStatus(structs.MsgTypeTestSetup, 3, req) 8021 if err != nil { 8022 t.Fatalf("bad: %v", err) 8023 } 8024 8025 // Check that the status was updated properly 8026 ws := memdb.NewWatchSet() 8027 dout, err := state.DeploymentByID(ws, d.ID) 8028 if err != nil { 8029 t.Fatalf("bad: %v", err) 8030 } 8031 if dout.Status != structs.DeploymentStatusSuccessful || 8032 dout.StatusDescription != structs.DeploymentStatusDescriptionSuccessful { 8033 t.Fatalf("bad: %#v", dout) 8034 } 8035 8036 // Check that the job was created 8037 jout, _ := state.JobByID(ws, job.Namespace, job.ID) 8038 if err != nil { 8039 t.Fatalf("bad: %v", err) 8040 } 8041 if jout == nil { 8042 t.Fatalf("bad: %#v", jout) 8043 } 8044 if !jout.Stable { 8045 t.Fatalf("job not marked stable %#v", jout) 8046 } 8047 if jout.Version != d.JobVersion { 8048 t.Fatalf("job version changed; got %d; want %d", jout.Version, d.JobVersion) 8049 } 8050 } 8051 8052 func TestStateStore_UpdateJobStability(t *testing.T) { 8053 ci.Parallel(t) 8054 8055 state := testStateStore(t) 8056 8057 // Insert a job twice to get two versions 8058 job := mock.Job() 8059 require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1, nil, job)) 8060 8061 require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 2, nil, job.Copy())) 8062 8063 // Update the stability to true 8064 err := state.UpdateJobStability(3, job.Namespace, job.ID, 0, true) 8065 require.NoError(t, err) 8066 8067 // Check that the job was updated properly 8068 ws := memdb.NewWatchSet() 8069 jout, err := state.JobByIDAndVersion(ws, job.Namespace, job.ID, 0) 8070 require.NoError(t, err) 8071 require.NotNil(t, jout) 8072 require.True(t, jout.Stable, "job not marked as stable") 8073 8074 // Update the stability to false 8075 err = state.UpdateJobStability(3, job.Namespace, job.ID, 0, false) 8076 if err != nil { 8077 t.Fatalf("bad: %v", err) 8078 } 8079 8080 // Check that the job was updated properly 8081 jout, err = state.JobByIDAndVersion(ws, job.Namespace, job.ID, 0) 8082 require.NoError(t, err) 8083 require.NotNil(t, jout) 8084 require.False(t, jout.Stable) 8085 } 8086 8087 // Test that nonexistent deployment can't be promoted 8088 func TestStateStore_UpsertDeploymentPromotion_Nonexistent(t *testing.T) { 8089 ci.Parallel(t) 8090 8091 state := testStateStore(t) 8092 8093 // Promote the nonexistent deployment 8094 req := &structs.ApplyDeploymentPromoteRequest{ 8095 DeploymentPromoteRequest: structs.DeploymentPromoteRequest{ 8096 DeploymentID: uuid.Generate(), 8097 All: true, 8098 }, 8099 } 8100 err := state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 2, req) 8101 if err == nil || !strings.Contains(err.Error(), "does not exist") { 8102 t.Fatalf("expected error promoting because the deployment doesn't exist") 8103 } 8104 } 8105 8106 // Test that terminal deployment can't be updated 8107 func TestStateStore_UpsertDeploymentPromotion_Terminal(t *testing.T) { 8108 ci.Parallel(t) 8109 8110 state := testStateStore(t) 8111 8112 // Insert a terminal deployment 8113 d := mock.Deployment() 8114 d.Status = structs.DeploymentStatusFailed 8115 8116 if err := state.UpsertDeployment(1, d); err != nil { 8117 t.Fatalf("bad: %v", err) 8118 } 8119 8120 // Promote the deployment 8121 req := &structs.ApplyDeploymentPromoteRequest{ 8122 DeploymentPromoteRequest: structs.DeploymentPromoteRequest{ 8123 DeploymentID: d.ID, 8124 All: true, 8125 }, 8126 } 8127 err := state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 2, req) 8128 if err == nil || !strings.Contains(err.Error(), "has terminal status") { 8129 t.Fatalf("expected error updating the status because the deployment is terminal: %v", err) 8130 } 8131 } 8132 8133 // Test promoting unhealthy canaries in a deployment. 8134 func TestStateStore_UpsertDeploymentPromotion_Unhealthy(t *testing.T) { 8135 ci.Parallel(t) 8136 8137 state := testStateStore(t) 8138 require := require.New(t) 8139 8140 // Create a job 8141 j := mock.Job() 8142 require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1, nil, j)) 8143 8144 // Create a deployment 8145 d := mock.Deployment() 8146 d.JobID = j.ID 8147 d.TaskGroups["web"].DesiredCanaries = 2 8148 require.Nil(state.UpsertDeployment(2, d)) 8149 8150 // Create a set of allocations 8151 c1 := mock.Alloc() 8152 c1.JobID = j.ID 8153 c1.DeploymentID = d.ID 8154 d.TaskGroups[c1.TaskGroup].PlacedCanaries = append(d.TaskGroups[c1.TaskGroup].PlacedCanaries, c1.ID) 8155 c2 := mock.Alloc() 8156 c2.JobID = j.ID 8157 c2.DeploymentID = d.ID 8158 d.TaskGroups[c2.TaskGroup].PlacedCanaries = append(d.TaskGroups[c2.TaskGroup].PlacedCanaries, c2.ID) 8159 8160 // Create a healthy but terminal alloc 8161 c3 := mock.Alloc() 8162 c3.JobID = j.ID 8163 c3.DeploymentID = d.ID 8164 c3.DesiredStatus = structs.AllocDesiredStatusStop 8165 c3.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: pointer.Of(true)} 8166 d.TaskGroups[c3.TaskGroup].PlacedCanaries = append(d.TaskGroups[c3.TaskGroup].PlacedCanaries, c3.ID) 8167 8168 require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{c1, c2, c3})) 8169 8170 // Promote the canaries 8171 req := &structs.ApplyDeploymentPromoteRequest{ 8172 DeploymentPromoteRequest: structs.DeploymentPromoteRequest{ 8173 DeploymentID: d.ID, 8174 All: true, 8175 }, 8176 } 8177 err := state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 4, req) 8178 require.NotNil(err) 8179 require.Contains(err.Error(), `Task group "web" has 0/2 healthy allocations`) 8180 } 8181 8182 // Test promoting a deployment with no canaries 8183 func TestStateStore_UpsertDeploymentPromotion_NoCanaries(t *testing.T) { 8184 ci.Parallel(t) 8185 8186 state := testStateStore(t) 8187 require := require.New(t) 8188 8189 // Create a job 8190 j := mock.Job() 8191 require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1, nil, j)) 8192 8193 // Create a deployment 8194 d := mock.Deployment() 8195 d.TaskGroups["web"].DesiredCanaries = 2 8196 d.JobID = j.ID 8197 require.Nil(state.UpsertDeployment(2, d)) 8198 8199 // Promote the canaries 8200 req := &structs.ApplyDeploymentPromoteRequest{ 8201 DeploymentPromoteRequest: structs.DeploymentPromoteRequest{ 8202 DeploymentID: d.ID, 8203 All: true, 8204 }, 8205 } 8206 err := state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 4, req) 8207 require.NotNil(err) 8208 require.Contains(err.Error(), `Task group "web" has 0/2 healthy allocations`) 8209 } 8210 8211 // Test promoting all canaries in a deployment. 8212 func TestStateStore_UpsertDeploymentPromotion_All(t *testing.T) { 8213 ci.Parallel(t) 8214 8215 state := testStateStore(t) 8216 8217 // Create a job with two task groups 8218 j := mock.Job() 8219 tg1 := j.TaskGroups[0] 8220 tg2 := tg1.Copy() 8221 tg2.Name = "foo" 8222 j.TaskGroups = append(j.TaskGroups, tg2) 8223 if err := state.UpsertJob(structs.MsgTypeTestSetup, 1, nil, j); err != nil { 8224 t.Fatalf("bad: %v", err) 8225 } 8226 8227 // Create a deployment 8228 d := mock.Deployment() 8229 d.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion 8230 d.JobID = j.ID 8231 d.TaskGroups = map[string]*structs.DeploymentState{ 8232 "web": { 8233 DesiredTotal: 10, 8234 DesiredCanaries: 1, 8235 }, 8236 "foo": { 8237 DesiredTotal: 10, 8238 DesiredCanaries: 1, 8239 }, 8240 } 8241 if err := state.UpsertDeployment(2, d); err != nil { 8242 t.Fatalf("bad: %v", err) 8243 } 8244 8245 // Create a set of allocations 8246 c1 := mock.Alloc() 8247 c1.JobID = j.ID 8248 c1.DeploymentID = d.ID 8249 d.TaskGroups[c1.TaskGroup].PlacedCanaries = append(d.TaskGroups[c1.TaskGroup].PlacedCanaries, c1.ID) 8250 c1.DeploymentStatus = &structs.AllocDeploymentStatus{ 8251 Healthy: pointer.Of(true), 8252 } 8253 c2 := mock.Alloc() 8254 c2.JobID = j.ID 8255 c2.DeploymentID = d.ID 8256 d.TaskGroups[c2.TaskGroup].PlacedCanaries = append(d.TaskGroups[c2.TaskGroup].PlacedCanaries, c2.ID) 8257 c2.TaskGroup = tg2.Name 8258 c2.DeploymentStatus = &structs.AllocDeploymentStatus{ 8259 Healthy: pointer.Of(true), 8260 } 8261 8262 if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{c1, c2}); err != nil { 8263 t.Fatalf("err: %v", err) 8264 } 8265 8266 // Create an eval 8267 e := mock.Eval() 8268 8269 // Promote the canaries 8270 req := &structs.ApplyDeploymentPromoteRequest{ 8271 DeploymentPromoteRequest: structs.DeploymentPromoteRequest{ 8272 DeploymentID: d.ID, 8273 All: true, 8274 }, 8275 Eval: e, 8276 } 8277 err := state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 4, req) 8278 if err != nil { 8279 t.Fatalf("bad: %v", err) 8280 } 8281 8282 // Check that the status per task group was updated properly 8283 ws := memdb.NewWatchSet() 8284 dout, err := state.DeploymentByID(ws, d.ID) 8285 if err != nil { 8286 t.Fatalf("bad: %v", err) 8287 } 8288 if dout.StatusDescription != structs.DeploymentStatusDescriptionRunning { 8289 t.Fatalf("status description not updated: got %v; want %v", dout.StatusDescription, structs.DeploymentStatusDescriptionRunning) 8290 } 8291 if len(dout.TaskGroups) != 2 { 8292 t.Fatalf("bad: %#v", dout.TaskGroups) 8293 } 8294 for tg, state := range dout.TaskGroups { 8295 if !state.Promoted { 8296 t.Fatalf("bad: group %q not promoted %#v", tg, state) 8297 } 8298 } 8299 8300 // Check that the evaluation was created 8301 eout, _ := state.EvalByID(ws, e.ID) 8302 if err != nil { 8303 t.Fatalf("bad: %v", err) 8304 } 8305 if eout == nil { 8306 t.Fatalf("bad: %#v", eout) 8307 } 8308 } 8309 8310 // Test promoting a subset of canaries in a deployment. 8311 func TestStateStore_UpsertDeploymentPromotion_Subset(t *testing.T) { 8312 ci.Parallel(t) 8313 require := require.New(t) 8314 8315 state := testStateStore(t) 8316 8317 // Create a job with two task groups 8318 j := mock.Job() 8319 tg1 := j.TaskGroups[0] 8320 tg2 := tg1.Copy() 8321 tg2.Name = "foo" 8322 j.TaskGroups = append(j.TaskGroups, tg2) 8323 require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1, nil, j)) 8324 8325 // Create a deployment 8326 d := mock.Deployment() 8327 d.JobID = j.ID 8328 d.TaskGroups = map[string]*structs.DeploymentState{ 8329 "web": { 8330 DesiredTotal: 10, 8331 DesiredCanaries: 1, 8332 }, 8333 "foo": { 8334 DesiredTotal: 10, 8335 DesiredCanaries: 1, 8336 }, 8337 } 8338 require.Nil(state.UpsertDeployment(2, d)) 8339 8340 // Create a set of allocations for both groups, including an unhealthy one 8341 c1 := mock.Alloc() 8342 c1.JobID = j.ID 8343 c1.DeploymentID = d.ID 8344 d.TaskGroups[c1.TaskGroup].PlacedCanaries = append(d.TaskGroups[c1.TaskGroup].PlacedCanaries, c1.ID) 8345 c1.DeploymentStatus = &structs.AllocDeploymentStatus{ 8346 Healthy: pointer.Of(true), 8347 Canary: true, 8348 } 8349 8350 // Should still be a canary 8351 c2 := mock.Alloc() 8352 c2.JobID = j.ID 8353 c2.DeploymentID = d.ID 8354 d.TaskGroups[c2.TaskGroup].PlacedCanaries = append(d.TaskGroups[c2.TaskGroup].PlacedCanaries, c2.ID) 8355 c2.TaskGroup = tg2.Name 8356 c2.DeploymentStatus = &structs.AllocDeploymentStatus{ 8357 Healthy: pointer.Of(true), 8358 Canary: true, 8359 } 8360 8361 c3 := mock.Alloc() 8362 c3.JobID = j.ID 8363 c3.DeploymentID = d.ID 8364 d.TaskGroups[c3.TaskGroup].PlacedCanaries = append(d.TaskGroups[c3.TaskGroup].PlacedCanaries, c3.ID) 8365 c3.DeploymentStatus = &structs.AllocDeploymentStatus{ 8366 Healthy: pointer.Of(false), 8367 Canary: true, 8368 } 8369 8370 require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{c1, c2, c3})) 8371 8372 // Create an eval 8373 e := mock.Eval() 8374 8375 // Promote the canaries 8376 req := &structs.ApplyDeploymentPromoteRequest{ 8377 DeploymentPromoteRequest: structs.DeploymentPromoteRequest{ 8378 DeploymentID: d.ID, 8379 Groups: []string{"web"}, 8380 }, 8381 Eval: e, 8382 } 8383 require.Nil(state.UpdateDeploymentPromotion(structs.MsgTypeTestSetup, 4, req)) 8384 8385 // Check that the status per task group was updated properly 8386 ws := memdb.NewWatchSet() 8387 dout, err := state.DeploymentByID(ws, d.ID) 8388 require.Nil(err) 8389 require.Len(dout.TaskGroups, 2) 8390 require.Contains(dout.TaskGroups, "web") 8391 require.True(dout.TaskGroups["web"].Promoted) 8392 8393 // Check that the evaluation was created 8394 eout, err := state.EvalByID(ws, e.ID) 8395 require.Nil(err) 8396 require.NotNil(eout) 8397 8398 // Check the canary field was set properly 8399 aout1, err1 := state.AllocByID(ws, c1.ID) 8400 aout2, err2 := state.AllocByID(ws, c2.ID) 8401 aout3, err3 := state.AllocByID(ws, c3.ID) 8402 require.Nil(err1) 8403 require.Nil(err2) 8404 require.Nil(err3) 8405 require.NotNil(aout1) 8406 require.NotNil(aout2) 8407 require.NotNil(aout3) 8408 require.False(aout1.DeploymentStatus.Canary) 8409 require.True(aout2.DeploymentStatus.Canary) 8410 require.True(aout3.DeploymentStatus.Canary) 8411 } 8412 8413 // Test that allocation health can't be set against a nonexistent deployment 8414 func TestStateStore_UpsertDeploymentAllocHealth_Nonexistent(t *testing.T) { 8415 ci.Parallel(t) 8416 8417 state := testStateStore(t) 8418 8419 // Set health against the nonexistent deployment 8420 req := &structs.ApplyDeploymentAllocHealthRequest{ 8421 DeploymentAllocHealthRequest: structs.DeploymentAllocHealthRequest{ 8422 DeploymentID: uuid.Generate(), 8423 HealthyAllocationIDs: []string{uuid.Generate()}, 8424 }, 8425 } 8426 err := state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, 2, req) 8427 if err == nil || !strings.Contains(err.Error(), "does not exist") { 8428 t.Fatalf("expected error because the deployment doesn't exist: %v", err) 8429 } 8430 } 8431 8432 // Test that allocation health can't be set against a terminal deployment 8433 func TestStateStore_UpsertDeploymentAllocHealth_Terminal(t *testing.T) { 8434 ci.Parallel(t) 8435 8436 state := testStateStore(t) 8437 8438 // Insert a terminal deployment 8439 d := mock.Deployment() 8440 d.Status = structs.DeploymentStatusFailed 8441 8442 if err := state.UpsertDeployment(1, d); err != nil { 8443 t.Fatalf("bad: %v", err) 8444 } 8445 8446 // Set health against the terminal deployment 8447 req := &structs.ApplyDeploymentAllocHealthRequest{ 8448 DeploymentAllocHealthRequest: structs.DeploymentAllocHealthRequest{ 8449 DeploymentID: d.ID, 8450 HealthyAllocationIDs: []string{uuid.Generate()}, 8451 }, 8452 } 8453 err := state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, 2, req) 8454 if err == nil || !strings.Contains(err.Error(), "has terminal status") { 8455 t.Fatalf("expected error because the deployment is terminal: %v", err) 8456 } 8457 } 8458 8459 // Test that allocation health can't be set against a nonexistent alloc 8460 func TestStateStore_UpsertDeploymentAllocHealth_BadAlloc_Nonexistent(t *testing.T) { 8461 ci.Parallel(t) 8462 8463 state := testStateStore(t) 8464 8465 // Insert a deployment 8466 d := mock.Deployment() 8467 if err := state.UpsertDeployment(1, d); err != nil { 8468 t.Fatalf("bad: %v", err) 8469 } 8470 8471 // Set health against the terminal deployment 8472 req := &structs.ApplyDeploymentAllocHealthRequest{ 8473 DeploymentAllocHealthRequest: structs.DeploymentAllocHealthRequest{ 8474 DeploymentID: d.ID, 8475 HealthyAllocationIDs: []string{uuid.Generate()}, 8476 }, 8477 } 8478 err := state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, 2, req) 8479 if err == nil || !strings.Contains(err.Error(), "unknown alloc") { 8480 t.Fatalf("expected error because the alloc doesn't exist: %v", err) 8481 } 8482 } 8483 8484 // Test that a deployments PlacedCanaries is properly updated 8485 func TestStateStore_UpsertDeploymentAlloc_Canaries(t *testing.T) { 8486 ci.Parallel(t) 8487 8488 state := testStateStore(t) 8489 8490 // Create a deployment 8491 d1 := mock.Deployment() 8492 require.NoError(t, state.UpsertDeployment(2, d1)) 8493 8494 // Create a Job 8495 job := mock.Job() 8496 require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 3, nil, job)) 8497 8498 // Create alloc with canary status 8499 a := mock.Alloc() 8500 a.JobID = job.ID 8501 a.DeploymentID = d1.ID 8502 a.DeploymentStatus = &structs.AllocDeploymentStatus{ 8503 Healthy: pointer.Of(false), 8504 Canary: true, 8505 } 8506 require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 4, []*structs.Allocation{a})) 8507 8508 // Pull the deployment from state 8509 ws := memdb.NewWatchSet() 8510 deploy, err := state.DeploymentByID(ws, d1.ID) 8511 require.NoError(t, err) 8512 8513 // Ensure that PlacedCanaries is accurate 8514 require.Equal(t, 1, len(deploy.TaskGroups[job.TaskGroups[0].Name].PlacedCanaries)) 8515 8516 // Create alloc without canary status 8517 b := mock.Alloc() 8518 b.JobID = job.ID 8519 b.DeploymentID = d1.ID 8520 b.DeploymentStatus = &structs.AllocDeploymentStatus{ 8521 Healthy: pointer.Of(false), 8522 Canary: false, 8523 } 8524 require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 4, []*structs.Allocation{b})) 8525 8526 // Pull the deployment from state 8527 ws = memdb.NewWatchSet() 8528 deploy, err = state.DeploymentByID(ws, d1.ID) 8529 require.NoError(t, err) 8530 8531 // Ensure that PlacedCanaries is accurate 8532 require.Equal(t, 1, len(deploy.TaskGroups[job.TaskGroups[0].Name].PlacedCanaries)) 8533 8534 // Create a second deployment 8535 d2 := mock.Deployment() 8536 require.NoError(t, state.UpsertDeployment(5, d2)) 8537 8538 c := mock.Alloc() 8539 c.JobID = job.ID 8540 c.DeploymentID = d2.ID 8541 c.DeploymentStatus = &structs.AllocDeploymentStatus{ 8542 Healthy: pointer.Of(false), 8543 Canary: true, 8544 } 8545 require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 6, []*structs.Allocation{c})) 8546 8547 ws = memdb.NewWatchSet() 8548 deploy2, err := state.DeploymentByID(ws, d2.ID) 8549 require.NoError(t, err) 8550 8551 // Ensure that PlacedCanaries is accurate 8552 require.Equal(t, 1, len(deploy2.TaskGroups[job.TaskGroups[0].Name].PlacedCanaries)) 8553 } 8554 8555 func TestStateStore_UpsertDeploymentAlloc_NoCanaries(t *testing.T) { 8556 ci.Parallel(t) 8557 8558 state := testStateStore(t) 8559 8560 // Create a deployment 8561 d1 := mock.Deployment() 8562 require.NoError(t, state.UpsertDeployment(2, d1)) 8563 8564 // Create a Job 8565 job := mock.Job() 8566 require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 3, nil, job)) 8567 8568 // Create alloc with canary status 8569 a := mock.Alloc() 8570 a.JobID = job.ID 8571 a.DeploymentID = d1.ID 8572 a.DeploymentStatus = &structs.AllocDeploymentStatus{ 8573 Healthy: pointer.Of(true), 8574 Canary: false, 8575 } 8576 require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 4, []*structs.Allocation{a})) 8577 8578 // Pull the deployment from state 8579 ws := memdb.NewWatchSet() 8580 deploy, err := state.DeploymentByID(ws, d1.ID) 8581 require.NoError(t, err) 8582 8583 // Ensure that PlacedCanaries is accurate 8584 require.Equal(t, 0, len(deploy.TaskGroups[job.TaskGroups[0].Name].PlacedCanaries)) 8585 } 8586 8587 // Test that allocation health can't be set for an alloc with mismatched 8588 // deployment ids 8589 func TestStateStore_UpsertDeploymentAllocHealth_BadAlloc_MismatchDeployment(t *testing.T) { 8590 ci.Parallel(t) 8591 8592 state := testStateStore(t) 8593 8594 // Insert two deployment 8595 d1 := mock.Deployment() 8596 d2 := mock.Deployment() 8597 if err := state.UpsertDeployment(1, d1); err != nil { 8598 t.Fatalf("bad: %v", err) 8599 } 8600 if err := state.UpsertDeployment(2, d2); err != nil { 8601 t.Fatalf("bad: %v", err) 8602 } 8603 8604 // Insert an alloc for a random deployment 8605 a := mock.Alloc() 8606 a.DeploymentID = d1.ID 8607 if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{a}); err != nil { 8608 t.Fatalf("bad: %v", err) 8609 } 8610 8611 // Set health against the terminal deployment 8612 req := &structs.ApplyDeploymentAllocHealthRequest{ 8613 DeploymentAllocHealthRequest: structs.DeploymentAllocHealthRequest{ 8614 DeploymentID: d2.ID, 8615 HealthyAllocationIDs: []string{a.ID}, 8616 }, 8617 } 8618 err := state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, 4, req) 8619 if err == nil || !strings.Contains(err.Error(), "not part of deployment") { 8620 t.Fatalf("expected error because the alloc isn't part of the deployment: %v", err) 8621 } 8622 } 8623 8624 // Test that allocation health is properly set 8625 func TestStateStore_UpsertDeploymentAllocHealth(t *testing.T) { 8626 ci.Parallel(t) 8627 8628 state := testStateStore(t) 8629 8630 // Insert a deployment 8631 d := mock.Deployment() 8632 d.TaskGroups["web"].ProgressDeadline = 5 * time.Minute 8633 if err := state.UpsertDeployment(1, d); err != nil { 8634 t.Fatalf("bad: %v", err) 8635 } 8636 8637 // Insert two allocations 8638 a1 := mock.Alloc() 8639 a1.DeploymentID = d.ID 8640 a2 := mock.Alloc() 8641 a2.DeploymentID = d.ID 8642 if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 2, []*structs.Allocation{a1, a2}); err != nil { 8643 t.Fatalf("bad: %v", err) 8644 } 8645 8646 // Create a job to roll back to 8647 j := mock.Job() 8648 8649 // Create an eval that should be upserted 8650 e := mock.Eval() 8651 8652 // Create a status update for the deployment 8653 status, desc := structs.DeploymentStatusFailed, "foo" 8654 u := &structs.DeploymentStatusUpdate{ 8655 DeploymentID: d.ID, 8656 Status: status, 8657 StatusDescription: desc, 8658 } 8659 8660 // Capture the time for the update 8661 ts := time.Now() 8662 8663 // Set health against the deployment 8664 req := &structs.ApplyDeploymentAllocHealthRequest{ 8665 DeploymentAllocHealthRequest: structs.DeploymentAllocHealthRequest{ 8666 DeploymentID: d.ID, 8667 HealthyAllocationIDs: []string{a1.ID}, 8668 UnhealthyAllocationIDs: []string{a2.ID}, 8669 }, 8670 Job: j, 8671 Eval: e, 8672 DeploymentUpdate: u, 8673 Timestamp: ts, 8674 } 8675 err := state.UpdateDeploymentAllocHealth(structs.MsgTypeTestSetup, 3, req) 8676 if err != nil { 8677 t.Fatalf("bad: %v", err) 8678 } 8679 8680 // Check that the status was updated properly 8681 ws := memdb.NewWatchSet() 8682 dout, err := state.DeploymentByID(ws, d.ID) 8683 if err != nil { 8684 t.Fatalf("bad: %v", err) 8685 } 8686 if dout.Status != status || dout.StatusDescription != desc { 8687 t.Fatalf("bad: %#v", dout) 8688 } 8689 8690 // Check that the evaluation was created 8691 eout, _ := state.EvalByID(ws, e.ID) 8692 if err != nil { 8693 t.Fatalf("bad: %v", err) 8694 } 8695 if eout == nil { 8696 t.Fatalf("bad: %#v", eout) 8697 } 8698 8699 // Check that the job was created 8700 jout, _ := state.JobByID(ws, j.Namespace, j.ID) 8701 if err != nil { 8702 t.Fatalf("bad: %v", err) 8703 } 8704 if jout == nil { 8705 t.Fatalf("bad: %#v", jout) 8706 } 8707 8708 // Check the status of the allocs 8709 out1, err := state.AllocByID(ws, a1.ID) 8710 if err != nil { 8711 t.Fatalf("err: %v", err) 8712 } 8713 out2, err := state.AllocByID(ws, a2.ID) 8714 if err != nil { 8715 t.Fatalf("err: %v", err) 8716 } 8717 8718 if !out1.DeploymentStatus.IsHealthy() { 8719 t.Fatalf("bad: alloc %q not healthy", out1.ID) 8720 } 8721 if !out2.DeploymentStatus.IsUnhealthy() { 8722 t.Fatalf("bad: alloc %q not unhealthy", out2.ID) 8723 } 8724 8725 if !out1.DeploymentStatus.Timestamp.Equal(ts) { 8726 t.Fatalf("bad: alloc %q had timestamp %v; want %v", out1.ID, out1.DeploymentStatus.Timestamp, ts) 8727 } 8728 if !out2.DeploymentStatus.Timestamp.Equal(ts) { 8729 t.Fatalf("bad: alloc %q had timestamp %v; want %v", out2.ID, out2.DeploymentStatus.Timestamp, ts) 8730 } 8731 } 8732 8733 func TestStateStore_UpsertVaultAccessors(t *testing.T) { 8734 ci.Parallel(t) 8735 8736 state := testStateStore(t) 8737 a := mock.VaultAccessor() 8738 a2 := mock.VaultAccessor() 8739 8740 ws := memdb.NewWatchSet() 8741 if _, err := state.VaultAccessor(ws, a.Accessor); err != nil { 8742 t.Fatalf("err: %v", err) 8743 } 8744 8745 if _, err := state.VaultAccessor(ws, a2.Accessor); err != nil { 8746 t.Fatalf("err: %v", err) 8747 } 8748 8749 err := state.UpsertVaultAccessor(1000, []*structs.VaultAccessor{a, a2}) 8750 if err != nil { 8751 t.Fatalf("err: %v", err) 8752 } 8753 8754 if !watchFired(ws) { 8755 t.Fatalf("bad") 8756 } 8757 8758 ws = memdb.NewWatchSet() 8759 out, err := state.VaultAccessor(ws, a.Accessor) 8760 if err != nil { 8761 t.Fatalf("err: %v", err) 8762 } 8763 8764 if !reflect.DeepEqual(a, out) { 8765 t.Fatalf("bad: %#v %#v", a, out) 8766 } 8767 8768 out, err = state.VaultAccessor(ws, a2.Accessor) 8769 if err != nil { 8770 t.Fatalf("err: %v", err) 8771 } 8772 8773 if !reflect.DeepEqual(a2, out) { 8774 t.Fatalf("bad: %#v %#v", a2, out) 8775 } 8776 8777 iter, err := state.VaultAccessors(ws) 8778 if err != nil { 8779 t.Fatalf("err: %v", err) 8780 } 8781 8782 count := 0 8783 for { 8784 raw := iter.Next() 8785 if raw == nil { 8786 break 8787 } 8788 8789 count++ 8790 accessor := raw.(*structs.VaultAccessor) 8791 8792 if !reflect.DeepEqual(accessor, a) && !reflect.DeepEqual(accessor, a2) { 8793 t.Fatalf("bad: %#v", accessor) 8794 } 8795 } 8796 8797 if count != 2 { 8798 t.Fatalf("bad: %d", count) 8799 } 8800 8801 index, err := state.Index("vault_accessors") 8802 if err != nil { 8803 t.Fatalf("err: %v", err) 8804 } 8805 if index != 1000 { 8806 t.Fatalf("bad: %d", index) 8807 } 8808 8809 if watchFired(ws) { 8810 t.Fatalf("bad") 8811 } 8812 } 8813 8814 func TestStateStore_DeleteVaultAccessors(t *testing.T) { 8815 ci.Parallel(t) 8816 8817 state := testStateStore(t) 8818 a1 := mock.VaultAccessor() 8819 a2 := mock.VaultAccessor() 8820 accessors := []*structs.VaultAccessor{a1, a2} 8821 8822 err := state.UpsertVaultAccessor(1000, accessors) 8823 if err != nil { 8824 t.Fatalf("err: %v", err) 8825 } 8826 8827 ws := memdb.NewWatchSet() 8828 if _, err := state.VaultAccessor(ws, a1.Accessor); err != nil { 8829 t.Fatalf("err: %v", err) 8830 } 8831 8832 err = state.DeleteVaultAccessors(1001, accessors) 8833 if err != nil { 8834 t.Fatalf("err: %v", err) 8835 } 8836 8837 if !watchFired(ws) { 8838 t.Fatalf("bad") 8839 } 8840 8841 ws = memdb.NewWatchSet() 8842 out, err := state.VaultAccessor(ws, a1.Accessor) 8843 if err != nil { 8844 t.Fatalf("err: %v", err) 8845 } 8846 if out != nil { 8847 t.Fatalf("bad: %#v %#v", a1, out) 8848 } 8849 out, err = state.VaultAccessor(ws, a2.Accessor) 8850 if err != nil { 8851 t.Fatalf("err: %v", err) 8852 } 8853 if out != nil { 8854 t.Fatalf("bad: %#v %#v", a2, out) 8855 } 8856 8857 index, err := state.Index("vault_accessors") 8858 if err != nil { 8859 t.Fatalf("err: %v", err) 8860 } 8861 if index != 1001 { 8862 t.Fatalf("bad: %d", index) 8863 } 8864 8865 if watchFired(ws) { 8866 t.Fatalf("bad") 8867 } 8868 } 8869 8870 func TestStateStore_VaultAccessorsByAlloc(t *testing.T) { 8871 ci.Parallel(t) 8872 8873 state := testStateStore(t) 8874 alloc := mock.Alloc() 8875 var accessors []*structs.VaultAccessor 8876 var expected []*structs.VaultAccessor 8877 8878 for i := 0; i < 5; i++ { 8879 accessor := mock.VaultAccessor() 8880 accessor.AllocID = alloc.ID 8881 expected = append(expected, accessor) 8882 accessors = append(accessors, accessor) 8883 } 8884 8885 for i := 0; i < 10; i++ { 8886 accessor := mock.VaultAccessor() 8887 accessors = append(accessors, accessor) 8888 } 8889 8890 err := state.UpsertVaultAccessor(1000, accessors) 8891 if err != nil { 8892 t.Fatalf("err: %v", err) 8893 } 8894 8895 ws := memdb.NewWatchSet() 8896 out, err := state.VaultAccessorsByAlloc(ws, alloc.ID) 8897 if err != nil { 8898 t.Fatalf("err: %v", err) 8899 } 8900 8901 if len(expected) != len(out) { 8902 t.Fatalf("bad: %#v %#v", len(expected), len(out)) 8903 } 8904 8905 index, err := state.Index("vault_accessors") 8906 if err != nil { 8907 t.Fatalf("err: %v", err) 8908 } 8909 if index != 1000 { 8910 t.Fatalf("bad: %d", index) 8911 } 8912 8913 if watchFired(ws) { 8914 t.Fatalf("bad") 8915 } 8916 } 8917 8918 func TestStateStore_VaultAccessorsByNode(t *testing.T) { 8919 ci.Parallel(t) 8920 8921 state := testStateStore(t) 8922 node := mock.Node() 8923 var accessors []*structs.VaultAccessor 8924 var expected []*structs.VaultAccessor 8925 8926 for i := 0; i < 5; i++ { 8927 accessor := mock.VaultAccessor() 8928 accessor.NodeID = node.ID 8929 expected = append(expected, accessor) 8930 accessors = append(accessors, accessor) 8931 } 8932 8933 for i := 0; i < 10; i++ { 8934 accessor := mock.VaultAccessor() 8935 accessors = append(accessors, accessor) 8936 } 8937 8938 err := state.UpsertVaultAccessor(1000, accessors) 8939 if err != nil { 8940 t.Fatalf("err: %v", err) 8941 } 8942 8943 ws := memdb.NewWatchSet() 8944 out, err := state.VaultAccessorsByNode(ws, node.ID) 8945 if err != nil { 8946 t.Fatalf("err: %v", err) 8947 } 8948 8949 if len(expected) != len(out) { 8950 t.Fatalf("bad: %#v %#v", len(expected), len(out)) 8951 } 8952 8953 index, err := state.Index("vault_accessors") 8954 if err != nil { 8955 t.Fatalf("err: %v", err) 8956 } 8957 if index != 1000 { 8958 t.Fatalf("bad: %d", index) 8959 } 8960 8961 if watchFired(ws) { 8962 t.Fatalf("bad") 8963 } 8964 } 8965 8966 func TestStateStore_UpsertSITokenAccessors(t *testing.T) { 8967 ci.Parallel(t) 8968 r := require.New(t) 8969 8970 state := testStateStore(t) 8971 a1 := mock.SITokenAccessor() 8972 a2 := mock.SITokenAccessor() 8973 8974 ws := memdb.NewWatchSet() 8975 var err error 8976 8977 _, err = state.SITokenAccessor(ws, a1.AccessorID) 8978 r.NoError(err) 8979 8980 _, err = state.SITokenAccessor(ws, a2.AccessorID) 8981 r.NoError(err) 8982 8983 err = state.UpsertSITokenAccessors(1000, []*structs.SITokenAccessor{a1, a2}) 8984 r.NoError(err) 8985 8986 wsFired := watchFired(ws) 8987 r.True(wsFired) 8988 8989 noInsertWS := memdb.NewWatchSet() 8990 result1, err := state.SITokenAccessor(noInsertWS, a1.AccessorID) 8991 r.NoError(err) 8992 r.Equal(a1, result1) 8993 8994 result2, err := state.SITokenAccessor(noInsertWS, a2.AccessorID) 8995 r.NoError(err) 8996 r.Equal(a2, result2) 8997 8998 iter, err := state.SITokenAccessors(noInsertWS) 8999 r.NoError(err) 9000 9001 count := 0 9002 for raw := iter.Next(); raw != nil; raw = iter.Next() { 9003 count++ 9004 accessor := raw.(*structs.SITokenAccessor) 9005 // iterator is sorted by dynamic UUID 9006 matches := reflect.DeepEqual(a1, accessor) || reflect.DeepEqual(a2, accessor) 9007 r.True(matches) 9008 } 9009 r.Equal(2, count) 9010 9011 index, err := state.Index(siTokenAccessorTable) 9012 r.NoError(err) 9013 r.Equal(uint64(1000), index) 9014 9015 noInsertWSFired := watchFired(noInsertWS) 9016 r.False(noInsertWSFired) 9017 } 9018 9019 func TestStateStore_DeleteSITokenAccessors(t *testing.T) { 9020 ci.Parallel(t) 9021 r := require.New(t) 9022 9023 state := testStateStore(t) 9024 a1 := mock.SITokenAccessor() 9025 a2 := mock.SITokenAccessor() 9026 accessors := []*structs.SITokenAccessor{a1, a2} 9027 var err error 9028 9029 err = state.UpsertSITokenAccessors(1000, accessors) 9030 r.NoError(err) 9031 9032 ws := memdb.NewWatchSet() 9033 _, err = state.SITokenAccessor(ws, a1.AccessorID) 9034 r.NoError(err) 9035 9036 err = state.DeleteSITokenAccessors(1001, accessors) 9037 r.NoError(err) 9038 9039 wsFired := watchFired(ws) 9040 r.True(wsFired) 9041 9042 wsPostDelete := memdb.NewWatchSet() 9043 9044 result1, err := state.SITokenAccessor(wsPostDelete, a1.AccessorID) 9045 r.NoError(err) 9046 r.Nil(result1) // was deleted 9047 9048 result2, err := state.SITokenAccessor(wsPostDelete, a2.AccessorID) 9049 r.NoError(err) 9050 r.Nil(result2) // was deleted 9051 9052 index, err := state.Index(siTokenAccessorTable) 9053 r.NoError(err) 9054 r.Equal(uint64(1001), index) 9055 9056 wsPostDeleteFired := watchFired(wsPostDelete) 9057 r.False(wsPostDeleteFired) 9058 } 9059 9060 func TestStateStore_SITokenAccessorsByAlloc(t *testing.T) { 9061 ci.Parallel(t) 9062 r := require.New(t) 9063 9064 state := testStateStore(t) 9065 alloc := mock.Alloc() 9066 var accessors []*structs.SITokenAccessor 9067 var expected []*structs.SITokenAccessor 9068 9069 for i := 0; i < 5; i++ { 9070 accessor := mock.SITokenAccessor() 9071 accessor.AllocID = alloc.ID 9072 expected = append(expected, accessor) 9073 accessors = append(accessors, accessor) 9074 } 9075 9076 for i := 0; i < 10; i++ { 9077 accessor := mock.SITokenAccessor() 9078 accessor.AllocID = uuid.Generate() // does not belong to alloc 9079 accessors = append(accessors, accessor) 9080 } 9081 9082 err := state.UpsertSITokenAccessors(1000, accessors) 9083 r.NoError(err) 9084 9085 ws := memdb.NewWatchSet() 9086 result, err := state.SITokenAccessorsByAlloc(ws, alloc.ID) 9087 r.NoError(err) 9088 r.ElementsMatch(expected, result) 9089 9090 index, err := state.Index(siTokenAccessorTable) 9091 r.NoError(err) 9092 r.Equal(uint64(1000), index) 9093 9094 wsFired := watchFired(ws) 9095 r.False(wsFired) 9096 } 9097 9098 func TestStateStore_SITokenAccessorsByNode(t *testing.T) { 9099 ci.Parallel(t) 9100 r := require.New(t) 9101 9102 state := testStateStore(t) 9103 node := mock.Node() 9104 var accessors []*structs.SITokenAccessor 9105 var expected []*structs.SITokenAccessor 9106 var err error 9107 9108 for i := 0; i < 5; i++ { 9109 accessor := mock.SITokenAccessor() 9110 accessor.NodeID = node.ID 9111 expected = append(expected, accessor) 9112 accessors = append(accessors, accessor) 9113 } 9114 9115 for i := 0; i < 10; i++ { 9116 accessor := mock.SITokenAccessor() 9117 accessor.NodeID = uuid.Generate() // does not belong to node 9118 accessors = append(accessors, accessor) 9119 } 9120 9121 err = state.UpsertSITokenAccessors(1000, accessors) 9122 r.NoError(err) 9123 9124 ws := memdb.NewWatchSet() 9125 result, err := state.SITokenAccessorsByNode(ws, node.ID) 9126 r.NoError(err) 9127 r.ElementsMatch(expected, result) 9128 9129 index, err := state.Index(siTokenAccessorTable) 9130 r.NoError(err) 9131 r.Equal(uint64(1000), index) 9132 9133 wsFired := watchFired(ws) 9134 r.False(wsFired) 9135 } 9136 9137 func TestStateStore_UpsertACLPolicy(t *testing.T) { 9138 ci.Parallel(t) 9139 9140 state := testStateStore(t) 9141 policy := mock.ACLPolicy() 9142 policy2 := mock.ACLPolicy() 9143 9144 ws := memdb.NewWatchSet() 9145 if _, err := state.ACLPolicyByName(ws, policy.Name); err != nil { 9146 t.Fatalf("err: %v", err) 9147 } 9148 if _, err := state.ACLPolicyByName(ws, policy2.Name); err != nil { 9149 t.Fatalf("err: %v", err) 9150 } 9151 9152 if err := state.UpsertACLPolicies(structs.MsgTypeTestSetup, 1000, []*structs.ACLPolicy{policy, policy2}); err != nil { 9153 t.Fatalf("err: %v", err) 9154 } 9155 if !watchFired(ws) { 9156 t.Fatalf("bad") 9157 } 9158 9159 ws = memdb.NewWatchSet() 9160 out, err := state.ACLPolicyByName(ws, policy.Name) 9161 assert.Equal(t, nil, err) 9162 assert.Equal(t, policy, out) 9163 9164 out, err = state.ACLPolicyByName(ws, policy2.Name) 9165 assert.Equal(t, nil, err) 9166 assert.Equal(t, policy2, out) 9167 9168 iter, err := state.ACLPolicies(ws) 9169 if err != nil { 9170 t.Fatalf("err: %v", err) 9171 } 9172 9173 // Ensure we see both policies 9174 count := 0 9175 for { 9176 raw := iter.Next() 9177 if raw == nil { 9178 break 9179 } 9180 count++ 9181 } 9182 if count != 2 { 9183 t.Fatalf("bad: %d", count) 9184 } 9185 9186 index, err := state.Index("acl_policy") 9187 if err != nil { 9188 t.Fatalf("err: %v", err) 9189 } 9190 if index != 1000 { 9191 t.Fatalf("bad: %d", index) 9192 } 9193 9194 if watchFired(ws) { 9195 t.Fatalf("bad") 9196 } 9197 } 9198 9199 func TestStateStore_DeleteACLPolicy(t *testing.T) { 9200 ci.Parallel(t) 9201 9202 state := testStateStore(t) 9203 policy := mock.ACLPolicy() 9204 policy2 := mock.ACLPolicy() 9205 9206 // Create the policy 9207 if err := state.UpsertACLPolicies(structs.MsgTypeTestSetup, 1000, []*structs.ACLPolicy{policy, policy2}); err != nil { 9208 t.Fatalf("err: %v", err) 9209 } 9210 9211 // Create a watcher 9212 ws := memdb.NewWatchSet() 9213 if _, err := state.ACLPolicyByName(ws, policy.Name); err != nil { 9214 t.Fatalf("err: %v", err) 9215 } 9216 9217 // Delete the policy 9218 if err := state.DeleteACLPolicies(structs.MsgTypeTestSetup, 1001, []string{policy.Name, policy2.Name}); err != nil { 9219 t.Fatalf("err: %v", err) 9220 } 9221 9222 // Ensure watching triggered 9223 if !watchFired(ws) { 9224 t.Fatalf("bad") 9225 } 9226 9227 // Ensure we don't get the object back 9228 ws = memdb.NewWatchSet() 9229 out, err := state.ACLPolicyByName(ws, policy.Name) 9230 assert.Equal(t, nil, err) 9231 if out != nil { 9232 t.Fatalf("bad: %#v", out) 9233 } 9234 9235 iter, err := state.ACLPolicies(ws) 9236 if err != nil { 9237 t.Fatalf("err: %v", err) 9238 } 9239 9240 // Ensure we see neither policy 9241 count := 0 9242 for { 9243 raw := iter.Next() 9244 if raw == nil { 9245 break 9246 } 9247 count++ 9248 } 9249 if count != 0 { 9250 t.Fatalf("bad: %d", count) 9251 } 9252 9253 index, err := state.Index("acl_policy") 9254 if err != nil { 9255 t.Fatalf("err: %v", err) 9256 } 9257 if index != 1001 { 9258 t.Fatalf("bad: %d", index) 9259 } 9260 9261 if watchFired(ws) { 9262 t.Fatalf("bad") 9263 } 9264 } 9265 9266 func TestStateStore_ACLPolicyByNamePrefix(t *testing.T) { 9267 ci.Parallel(t) 9268 9269 state := testStateStore(t) 9270 names := []string{ 9271 "foo", 9272 "bar", 9273 "foobar", 9274 "foozip", 9275 "zip", 9276 } 9277 9278 // Create the policies 9279 var baseIndex uint64 = 1000 9280 for _, name := range names { 9281 p := mock.ACLPolicy() 9282 p.Name = name 9283 if err := state.UpsertACLPolicies(structs.MsgTypeTestSetup, baseIndex, []*structs.ACLPolicy{p}); err != nil { 9284 t.Fatalf("err: %v", err) 9285 } 9286 baseIndex++ 9287 } 9288 9289 // Scan by prefix 9290 iter, err := state.ACLPolicyByNamePrefix(nil, "foo") 9291 if err != nil { 9292 t.Fatalf("err: %v", err) 9293 } 9294 9295 // Ensure we see both policies 9296 count := 0 9297 out := []string{} 9298 for { 9299 raw := iter.Next() 9300 if raw == nil { 9301 break 9302 } 9303 count++ 9304 out = append(out, raw.(*structs.ACLPolicy).Name) 9305 } 9306 if count != 3 { 9307 t.Fatalf("bad: %d %v", count, out) 9308 } 9309 sort.Strings(out) 9310 9311 expect := []string{"foo", "foobar", "foozip"} 9312 assert.Equal(t, expect, out) 9313 } 9314 9315 func TestStateStore_BootstrapACLTokens(t *testing.T) { 9316 ci.Parallel(t) 9317 9318 state := testStateStore(t) 9319 tk1 := mock.ACLToken() 9320 tk2 := mock.ACLToken() 9321 9322 ok, resetIdx, err := state.CanBootstrapACLToken() 9323 assert.Nil(t, err) 9324 assert.Equal(t, true, ok) 9325 assert.EqualValues(t, 0, resetIdx) 9326 9327 if err := state.BootstrapACLTokens(structs.MsgTypeTestSetup, 1000, 0, tk1); err != nil { 9328 t.Fatalf("err: %v", err) 9329 } 9330 9331 out, err := state.ACLTokenByAccessorID(nil, tk1.AccessorID) 9332 assert.Equal(t, nil, err) 9333 assert.Equal(t, tk1, out) 9334 9335 ok, resetIdx, err = state.CanBootstrapACLToken() 9336 assert.Nil(t, err) 9337 assert.Equal(t, false, ok) 9338 assert.EqualValues(t, 1000, resetIdx) 9339 9340 if err := state.BootstrapACLTokens(structs.MsgTypeTestSetup, 1001, 0, tk2); err == nil { 9341 t.Fatalf("expected error") 9342 } 9343 9344 iter, err := state.ACLTokens(nil, SortDefault) 9345 if err != nil { 9346 t.Fatalf("err: %v", err) 9347 } 9348 9349 // Ensure we see both policies 9350 count := 0 9351 for { 9352 raw := iter.Next() 9353 if raw == nil { 9354 break 9355 } 9356 count++ 9357 } 9358 if count != 1 { 9359 t.Fatalf("bad: %d", count) 9360 } 9361 9362 index, err := state.Index("acl_token") 9363 if err != nil { 9364 t.Fatalf("err: %v", err) 9365 } 9366 if index != 1000 { 9367 t.Fatalf("bad: %d", index) 9368 } 9369 index, err = state.Index("acl_token_bootstrap") 9370 if err != nil { 9371 t.Fatalf("err: %v", err) 9372 } 9373 if index != 1000 { 9374 t.Fatalf("bad: %d", index) 9375 } 9376 9377 // Should allow bootstrap with reset index 9378 if err := state.BootstrapACLTokens(structs.MsgTypeTestSetup, 1001, 1000, tk2); err != nil { 9379 t.Fatalf("err %v", err) 9380 } 9381 9382 // Check we've modified the index 9383 index, err = state.Index("acl_token") 9384 if err != nil { 9385 t.Fatalf("err: %v", err) 9386 } 9387 if index != 1001 { 9388 t.Fatalf("bad: %d", index) 9389 } 9390 index, err = state.Index("acl_token_bootstrap") 9391 if err != nil { 9392 t.Fatalf("err: %v", err) 9393 } 9394 if index != 1001 { 9395 t.Fatalf("bad: %d", index) 9396 } 9397 } 9398 9399 func TestStateStore_UpsertACLTokens(t *testing.T) { 9400 ci.Parallel(t) 9401 9402 state := testStateStore(t) 9403 tk1 := mock.ACLToken() 9404 tk2 := mock.ACLToken() 9405 9406 ws := memdb.NewWatchSet() 9407 if _, err := state.ACLTokenByAccessorID(ws, tk1.AccessorID); err != nil { 9408 t.Fatalf("err: %v", err) 9409 } 9410 if _, err := state.ACLTokenByAccessorID(ws, tk2.AccessorID); err != nil { 9411 t.Fatalf("err: %v", err) 9412 } 9413 9414 if err := state.UpsertACLTokens(structs.MsgTypeTestSetup, 1000, []*structs.ACLToken{tk1, tk2}); err != nil { 9415 t.Fatalf("err: %v", err) 9416 } 9417 if !watchFired(ws) { 9418 t.Fatalf("bad") 9419 } 9420 9421 ws = memdb.NewWatchSet() 9422 out, err := state.ACLTokenByAccessorID(ws, tk1.AccessorID) 9423 assert.Equal(t, nil, err) 9424 assert.Equal(t, tk1, out) 9425 9426 out, err = state.ACLTokenByAccessorID(ws, tk2.AccessorID) 9427 assert.Equal(t, nil, err) 9428 assert.Equal(t, tk2, out) 9429 9430 out, err = state.ACLTokenBySecretID(ws, tk1.SecretID) 9431 assert.Equal(t, nil, err) 9432 assert.Equal(t, tk1, out) 9433 9434 out, err = state.ACLTokenBySecretID(ws, tk2.SecretID) 9435 assert.Equal(t, nil, err) 9436 assert.Equal(t, tk2, out) 9437 9438 iter, err := state.ACLTokens(ws, SortDefault) 9439 if err != nil { 9440 t.Fatalf("err: %v", err) 9441 } 9442 9443 // Ensure we see both policies 9444 count := 0 9445 for { 9446 raw := iter.Next() 9447 if raw == nil { 9448 break 9449 } 9450 count++ 9451 } 9452 if count != 2 { 9453 t.Fatalf("bad: %d", count) 9454 } 9455 9456 index, err := state.Index("acl_token") 9457 if err != nil { 9458 t.Fatalf("err: %v", err) 9459 } 9460 if index != 1000 { 9461 t.Fatalf("bad: %d", index) 9462 } 9463 9464 if watchFired(ws) { 9465 t.Fatalf("bad") 9466 } 9467 } 9468 9469 func TestStateStore_DeleteACLTokens(t *testing.T) { 9470 ci.Parallel(t) 9471 9472 state := testStateStore(t) 9473 tk1 := mock.ACLToken() 9474 tk2 := mock.ACLToken() 9475 9476 // Create the tokens 9477 if err := state.UpsertACLTokens(structs.MsgTypeTestSetup, 1000, []*structs.ACLToken{tk1, tk2}); err != nil { 9478 t.Fatalf("err: %v", err) 9479 } 9480 9481 // Create a watcher 9482 ws := memdb.NewWatchSet() 9483 if _, err := state.ACLTokenByAccessorID(ws, tk1.AccessorID); err != nil { 9484 t.Fatalf("err: %v", err) 9485 } 9486 9487 // Delete the token 9488 if err := state.DeleteACLTokens(structs.MsgTypeTestSetup, 1001, []string{tk1.AccessorID, tk2.AccessorID}); err != nil { 9489 t.Fatalf("err: %v", err) 9490 } 9491 9492 // Ensure watching triggered 9493 if !watchFired(ws) { 9494 t.Fatalf("bad") 9495 } 9496 9497 // Ensure we don't get the object back 9498 ws = memdb.NewWatchSet() 9499 out, err := state.ACLTokenByAccessorID(ws, tk1.AccessorID) 9500 assert.Equal(t, nil, err) 9501 if out != nil { 9502 t.Fatalf("bad: %#v", out) 9503 } 9504 9505 iter, err := state.ACLTokens(ws, SortDefault) 9506 if err != nil { 9507 t.Fatalf("err: %v", err) 9508 } 9509 9510 // Ensure we see both policies 9511 count := 0 9512 for { 9513 raw := iter.Next() 9514 if raw == nil { 9515 break 9516 } 9517 count++ 9518 } 9519 if count != 0 { 9520 t.Fatalf("bad: %d", count) 9521 } 9522 9523 index, err := state.Index("acl_token") 9524 if err != nil { 9525 t.Fatalf("err: %v", err) 9526 } 9527 if index != 1001 { 9528 t.Fatalf("bad: %d", index) 9529 } 9530 9531 if watchFired(ws) { 9532 t.Fatalf("bad") 9533 } 9534 } 9535 9536 func TestStateStore_ACLTokenByAccessorIDPrefix(t *testing.T) { 9537 ci.Parallel(t) 9538 9539 state := testStateStore(t) 9540 prefixes := []string{ 9541 "aaaa", 9542 "aabb", 9543 "bbbb", 9544 "bbcc", 9545 "ffff", 9546 } 9547 9548 // Create the tokens 9549 var baseIndex uint64 = 1000 9550 for _, prefix := range prefixes { 9551 tk := mock.ACLToken() 9552 tk.AccessorID = prefix + tk.AccessorID[4:] 9553 err := state.UpsertACLTokens(structs.MsgTypeTestSetup, baseIndex, []*structs.ACLToken{tk}) 9554 require.NoError(t, err) 9555 baseIndex++ 9556 } 9557 9558 gatherTokens := func(iter memdb.ResultIterator) []*structs.ACLToken { 9559 var tokens []*structs.ACLToken 9560 for { 9561 raw := iter.Next() 9562 if raw == nil { 9563 break 9564 } 9565 tokens = append(tokens, raw.(*structs.ACLToken)) 9566 } 9567 return tokens 9568 } 9569 9570 t.Run("scan by prefix", func(t *testing.T) { 9571 iter, err := state.ACLTokenByAccessorIDPrefix(nil, "aa", SortDefault) 9572 require.NoError(t, err) 9573 9574 // Ensure we see both tokens 9575 out := gatherTokens(iter) 9576 require.Len(t, out, 2) 9577 9578 got := []string{} 9579 for _, t := range out { 9580 got = append(got, t.AccessorID[:4]) 9581 } 9582 expect := []string{"aaaa", "aabb"} 9583 require.Equal(t, expect, got) 9584 }) 9585 9586 t.Run("reverse order", func(t *testing.T) { 9587 iter, err := state.ACLTokenByAccessorIDPrefix(nil, "aa", SortReverse) 9588 require.NoError(t, err) 9589 9590 // Ensure we see both tokens 9591 out := gatherTokens(iter) 9592 require.Len(t, out, 2) 9593 9594 got := []string{} 9595 for _, t := range out { 9596 got = append(got, t.AccessorID[:4]) 9597 } 9598 expect := []string{"aabb", "aaaa"} 9599 require.Equal(t, expect, got) 9600 }) 9601 } 9602 9603 func TestStateStore_ACLTokensByGlobal(t *testing.T) { 9604 ci.Parallel(t) 9605 9606 state := testStateStore(t) 9607 tk1 := mock.ACLToken() 9608 tk1.AccessorID = "aaaa" + tk1.AccessorID[4:] 9609 9610 tk2 := mock.ACLToken() 9611 tk2.AccessorID = "aabb" + tk2.AccessorID[4:] 9612 9613 tk3 := mock.ACLToken() 9614 tk3.AccessorID = "bbbb" + tk3.AccessorID[4:] 9615 tk3.Global = true 9616 9617 tk4 := mock.ACLToken() 9618 tk4.AccessorID = "ffff" + tk4.AccessorID[4:] 9619 9620 err := state.UpsertACLTokens(structs.MsgTypeTestSetup, 1000, []*structs.ACLToken{tk1, tk2, tk3, tk4}) 9621 require.NoError(t, err) 9622 9623 gatherTokens := func(iter memdb.ResultIterator) []*structs.ACLToken { 9624 var tokens []*structs.ACLToken 9625 for { 9626 raw := iter.Next() 9627 if raw == nil { 9628 break 9629 } 9630 tokens = append(tokens, raw.(*structs.ACLToken)) 9631 } 9632 return tokens 9633 } 9634 9635 t.Run("only global tokens", func(t *testing.T) { 9636 iter, err := state.ACLTokensByGlobal(nil, true, SortDefault) 9637 require.NoError(t, err) 9638 9639 got := gatherTokens(iter) 9640 require.Len(t, got, 1) 9641 require.Equal(t, tk3.AccessorID, got[0].AccessorID) 9642 }) 9643 9644 t.Run("reverse order", func(t *testing.T) { 9645 iter, err := state.ACLTokensByGlobal(nil, false, SortReverse) 9646 require.NoError(t, err) 9647 9648 expected := []*structs.ACLToken{tk4, tk2, tk1} 9649 got := gatherTokens(iter) 9650 require.Len(t, got, 3) 9651 require.Equal(t, expected, got) 9652 }) 9653 } 9654 9655 func TestStateStore_OneTimeTokens(t *testing.T) { 9656 ci.Parallel(t) 9657 index := uint64(100) 9658 state := testStateStore(t) 9659 9660 // create some ACL tokens 9661 9662 token1 := mock.ACLToken() 9663 token2 := mock.ACLToken() 9664 token3 := mock.ACLToken() 9665 index++ 9666 require.Nil(t, state.UpsertACLTokens( 9667 structs.MsgTypeTestSetup, index, 9668 []*structs.ACLToken{token1, token2, token3})) 9669 9670 otts := []*structs.OneTimeToken{ 9671 { 9672 // expired OTT for token1 9673 OneTimeSecretID: uuid.Generate(), 9674 AccessorID: token1.AccessorID, 9675 ExpiresAt: time.Now().Add(-1 * time.Minute), 9676 }, 9677 { 9678 // valid OTT for token2 9679 OneTimeSecretID: uuid.Generate(), 9680 AccessorID: token2.AccessorID, 9681 ExpiresAt: time.Now().Add(10 * time.Minute), 9682 }, 9683 { 9684 // new but expired OTT for token2; this will be accepted even 9685 // though it's expired and overwrite the other one 9686 OneTimeSecretID: uuid.Generate(), 9687 AccessorID: token2.AccessorID, 9688 ExpiresAt: time.Now().Add(-10 * time.Minute), 9689 }, 9690 { 9691 // valid OTT for token3 9692 AccessorID: token3.AccessorID, 9693 OneTimeSecretID: uuid.Generate(), 9694 ExpiresAt: time.Now().Add(10 * time.Minute), 9695 }, 9696 { 9697 // new valid OTT for token3 9698 OneTimeSecretID: uuid.Generate(), 9699 AccessorID: token3.AccessorID, 9700 ExpiresAt: time.Now().Add(5 * time.Minute), 9701 }, 9702 } 9703 9704 for _, ott := range otts { 9705 index++ 9706 require.NoError(t, state.UpsertOneTimeToken(structs.MsgTypeTestSetup, index, ott)) 9707 } 9708 9709 // verify that we have exactly one OTT for each AccessorID 9710 9711 txn := state.db.ReadTxn() 9712 iter, err := txn.Get("one_time_token", "id") 9713 require.NoError(t, err) 9714 results := []*structs.OneTimeToken{} 9715 for { 9716 raw := iter.Next() 9717 if raw == nil { 9718 break 9719 } 9720 ott, ok := raw.(*structs.OneTimeToken) 9721 require.True(t, ok) 9722 results = append(results, ott) 9723 } 9724 9725 // results aren't ordered but if we have 3 OTT and all 3 tokens, we know 9726 // we have no duplicate accessors 9727 require.Len(t, results, 3) 9728 accessors := []string{ 9729 results[0].AccessorID, results[1].AccessorID, results[2].AccessorID} 9730 require.Contains(t, accessors, token1.AccessorID) 9731 require.Contains(t, accessors, token2.AccessorID) 9732 require.Contains(t, accessors, token3.AccessorID) 9733 9734 // now verify expiration 9735 9736 getExpiredTokens := func(now time.Time) []*structs.OneTimeToken { 9737 txn := state.db.ReadTxn() 9738 iter, err := state.oneTimeTokensExpiredTxn(txn, nil, now) 9739 require.NoError(t, err) 9740 9741 results := []*structs.OneTimeToken{} 9742 for { 9743 raw := iter.Next() 9744 if raw == nil { 9745 break 9746 } 9747 ott, ok := raw.(*structs.OneTimeToken) 9748 require.True(t, ok) 9749 results = append(results, ott) 9750 } 9751 return results 9752 } 9753 9754 results = getExpiredTokens(time.Now()) 9755 require.Len(t, results, 2) 9756 9757 // results aren't ordered 9758 expiredAccessors := []string{results[0].AccessorID, results[1].AccessorID} 9759 require.Contains(t, expiredAccessors, token1.AccessorID) 9760 require.Contains(t, expiredAccessors, token2.AccessorID) 9761 require.True(t, time.Now().After(results[0].ExpiresAt)) 9762 require.True(t, time.Now().After(results[1].ExpiresAt)) 9763 9764 // clear the expired tokens and verify they're gone 9765 index++ 9766 require.NoError(t, state.ExpireOneTimeTokens( 9767 structs.MsgTypeTestSetup, index, time.Now())) 9768 9769 results = getExpiredTokens(time.Now()) 9770 require.Len(t, results, 0) 9771 9772 // query the unexpired token 9773 ott, err := state.OneTimeTokenBySecret(nil, otts[len(otts)-1].OneTimeSecretID) 9774 require.NoError(t, err) 9775 require.Equal(t, token3.AccessorID, ott.AccessorID) 9776 require.True(t, time.Now().Before(ott.ExpiresAt)) 9777 9778 restore, err := state.Restore() 9779 require.NoError(t, err) 9780 err = restore.OneTimeTokenRestore(ott) 9781 require.NoError(t, err) 9782 require.NoError(t, restore.Commit()) 9783 9784 ott, err = state.OneTimeTokenBySecret(nil, otts[len(otts)-1].OneTimeSecretID) 9785 require.NoError(t, err) 9786 require.Equal(t, token3.AccessorID, ott.AccessorID) 9787 } 9788 9789 func TestStateStore_ClusterMetadata(t *testing.T) { 9790 require := require.New(t) 9791 9792 state := testStateStore(t) 9793 clusterID := "12345678-1234-1234-1234-1234567890" 9794 now := time.Now().UnixNano() 9795 meta := &structs.ClusterMetadata{ClusterID: clusterID, CreateTime: now} 9796 9797 err := state.ClusterSetMetadata(100, meta) 9798 require.NoError(err) 9799 9800 result, err := state.ClusterMetadata(nil) 9801 require.NoError(err) 9802 require.Equal(clusterID, result.ClusterID) 9803 require.Equal(now, result.CreateTime) 9804 } 9805 9806 func TestStateStore_UpsertScalingPolicy(t *testing.T) { 9807 ci.Parallel(t) 9808 require := require.New(t) 9809 9810 state := testStateStore(t) 9811 policy := mock.ScalingPolicy() 9812 policy2 := mock.ScalingPolicy() 9813 9814 wsAll := memdb.NewWatchSet() 9815 all, err := state.ScalingPolicies(wsAll) 9816 require.NoError(err) 9817 require.Nil(all.Next()) 9818 9819 ws := memdb.NewWatchSet() 9820 out, err := state.ScalingPolicyByTargetAndType(ws, policy.Target, policy.Type) 9821 require.NoError(err) 9822 require.Nil(out) 9823 9824 out, err = state.ScalingPolicyByTargetAndType(ws, policy2.Target, policy2.Type) 9825 require.NoError(err) 9826 require.Nil(out) 9827 9828 err = state.UpsertScalingPolicies(1000, []*structs.ScalingPolicy{policy, policy2}) 9829 require.NoError(err) 9830 require.True(watchFired(ws)) 9831 require.True(watchFired(wsAll)) 9832 9833 ws = memdb.NewWatchSet() 9834 out, err = state.ScalingPolicyByTargetAndType(ws, policy.Target, policy.Type) 9835 require.NoError(err) 9836 require.Equal(policy, out) 9837 9838 out, err = state.ScalingPolicyByTargetAndType(ws, policy2.Target, policy2.Type) 9839 require.NoError(err) 9840 require.Equal(policy2, out) 9841 9842 // Ensure we see both policies 9843 countPolicies := func() (n int, err error) { 9844 iter, err := state.ScalingPolicies(ws) 9845 if err != nil { 9846 return 9847 } 9848 9849 for raw := iter.Next(); raw != nil; raw = iter.Next() { 9850 n++ 9851 } 9852 return 9853 } 9854 9855 count, err := countPolicies() 9856 require.NoError(err) 9857 require.Equal(2, count) 9858 9859 index, err := state.Index("scaling_policy") 9860 require.NoError(err) 9861 require.True(1000 == index) 9862 require.False(watchFired(ws)) 9863 9864 // Check that we can add policy with same target but different type 9865 policy3 := mock.ScalingPolicy() 9866 for k, v := range policy2.Target { 9867 policy3.Target[k] = v 9868 } 9869 9870 err = state.UpsertScalingPolicies(1000, []*structs.ScalingPolicy{policy3}) 9871 require.NoError(err) 9872 9873 // Ensure we see both policies, since target didn't change 9874 count, err = countPolicies() 9875 require.NoError(err) 9876 require.Equal(2, count) 9877 9878 // Change type and check if we see 3 9879 policy3.Type = "other-type" 9880 9881 err = state.UpsertScalingPolicies(1000, []*structs.ScalingPolicy{policy3}) 9882 require.NoError(err) 9883 9884 count, err = countPolicies() 9885 require.NoError(err) 9886 require.Equal(3, count) 9887 } 9888 9889 func TestStateStore_UpsertScalingPolicy_Namespace(t *testing.T) { 9890 ci.Parallel(t) 9891 require := require.New(t) 9892 9893 otherNamespace := "not-default-namespace" 9894 state := testStateStore(t) 9895 policy := mock.ScalingPolicy() 9896 policy2 := mock.ScalingPolicy() 9897 policy2.Target[structs.ScalingTargetNamespace] = otherNamespace 9898 9899 ws1 := memdb.NewWatchSet() 9900 iter, err := state.ScalingPoliciesByNamespace(ws1, structs.DefaultNamespace, "") 9901 require.NoError(err) 9902 require.Nil(iter.Next()) 9903 9904 ws2 := memdb.NewWatchSet() 9905 iter, err = state.ScalingPoliciesByNamespace(ws2, otherNamespace, "") 9906 require.NoError(err) 9907 require.Nil(iter.Next()) 9908 9909 err = state.UpsertScalingPolicies(1000, []*structs.ScalingPolicy{policy, policy2}) 9910 require.NoError(err) 9911 require.True(watchFired(ws1)) 9912 require.True(watchFired(ws2)) 9913 9914 iter, err = state.ScalingPoliciesByNamespace(nil, structs.DefaultNamespace, "") 9915 require.NoError(err) 9916 policiesInDefaultNamespace := []string{} 9917 for { 9918 raw := iter.Next() 9919 if raw == nil { 9920 break 9921 } 9922 policiesInDefaultNamespace = append(policiesInDefaultNamespace, raw.(*structs.ScalingPolicy).ID) 9923 } 9924 require.ElementsMatch([]string{policy.ID}, policiesInDefaultNamespace) 9925 9926 iter, err = state.ScalingPoliciesByNamespace(nil, otherNamespace, "") 9927 require.NoError(err) 9928 policiesInOtherNamespace := []string{} 9929 for { 9930 raw := iter.Next() 9931 if raw == nil { 9932 break 9933 } 9934 policiesInOtherNamespace = append(policiesInOtherNamespace, raw.(*structs.ScalingPolicy).ID) 9935 } 9936 require.ElementsMatch([]string{policy2.ID}, policiesInOtherNamespace) 9937 } 9938 9939 func TestStateStore_UpsertScalingPolicy_Namespace_PrefixBug(t *testing.T) { 9940 ci.Parallel(t) 9941 require := require.New(t) 9942 9943 ns1 := "name" 9944 ns2 := "name2" // matches prefix "name" 9945 state := testStateStore(t) 9946 policy1 := mock.ScalingPolicy() 9947 policy1.Target[structs.ScalingTargetNamespace] = ns1 9948 policy2 := mock.ScalingPolicy() 9949 policy2.Target[structs.ScalingTargetNamespace] = ns2 9950 9951 ws1 := memdb.NewWatchSet() 9952 iter, err := state.ScalingPoliciesByNamespace(ws1, ns1, "") 9953 require.NoError(err) 9954 require.Nil(iter.Next()) 9955 9956 ws2 := memdb.NewWatchSet() 9957 iter, err = state.ScalingPoliciesByNamespace(ws2, ns2, "") 9958 require.NoError(err) 9959 require.Nil(iter.Next()) 9960 9961 err = state.UpsertScalingPolicies(1000, []*structs.ScalingPolicy{policy1, policy2}) 9962 require.NoError(err) 9963 require.True(watchFired(ws1)) 9964 require.True(watchFired(ws2)) 9965 9966 iter, err = state.ScalingPoliciesByNamespace(nil, ns1, "") 9967 require.NoError(err) 9968 policiesInNS1 := []string{} 9969 for { 9970 raw := iter.Next() 9971 if raw == nil { 9972 break 9973 } 9974 policiesInNS1 = append(policiesInNS1, raw.(*structs.ScalingPolicy).ID) 9975 } 9976 require.ElementsMatch([]string{policy1.ID}, policiesInNS1) 9977 9978 iter, err = state.ScalingPoliciesByNamespace(nil, ns2, "") 9979 require.NoError(err) 9980 policiesInNS2 := []string{} 9981 for { 9982 raw := iter.Next() 9983 if raw == nil { 9984 break 9985 } 9986 policiesInNS2 = append(policiesInNS2, raw.(*structs.ScalingPolicy).ID) 9987 } 9988 require.ElementsMatch([]string{policy2.ID}, policiesInNS2) 9989 } 9990 9991 // Scaling Policy IDs are generated randomly during Job.Register 9992 // Subsequent updates of the job should preserve the ID for the scaling policy 9993 // associated with a given target. 9994 func TestStateStore_UpsertJob_PreserveScalingPolicyIDsAndIndex(t *testing.T) { 9995 ci.Parallel(t) 9996 9997 require := require.New(t) 9998 9999 state := testStateStore(t) 10000 job, policy := mock.JobWithScalingPolicy() 10001 10002 var newIndex uint64 = 1000 10003 err := state.UpsertJob(structs.MsgTypeTestSetup, newIndex, nil, job) 10004 require.NoError(err) 10005 10006 ws := memdb.NewWatchSet() 10007 p1, err := state.ScalingPolicyByTargetAndType(ws, policy.Target, policy.Type) 10008 require.NoError(err) 10009 require.NotNil(p1) 10010 require.Equal(newIndex, p1.CreateIndex) 10011 require.Equal(newIndex, p1.ModifyIndex) 10012 10013 index, err := state.Index("scaling_policy") 10014 require.NoError(err) 10015 require.Equal(newIndex, index) 10016 require.NotEmpty(p1.ID) 10017 10018 // update the job 10019 job.Meta["new-meta"] = "new-value" 10020 newIndex += 100 10021 err = state.UpsertJob(structs.MsgTypeTestSetup, newIndex, nil, job) 10022 require.NoError(err) 10023 require.False(watchFired(ws), "watch should not have fired") 10024 10025 p2, err := state.ScalingPolicyByTargetAndType(nil, policy.Target, policy.Type) 10026 require.NoError(err) 10027 require.NotNil(p2) 10028 require.Equal(p1.ID, p2.ID, "ID should not have changed") 10029 require.Equal(p1.CreateIndex, p2.CreateIndex) 10030 require.Equal(p1.ModifyIndex, p2.ModifyIndex) 10031 10032 index, err = state.Index("scaling_policy") 10033 require.NoError(err) 10034 require.Equal(index, p1.CreateIndex, "table index should not have changed") 10035 } 10036 10037 // Updating the scaling policy for a job should update the index table and fire the watch. 10038 // This test is the converse of TestStateStore_UpsertJob_PreserveScalingPolicyIDsAndIndex 10039 func TestStateStore_UpsertJob_UpdateScalingPolicy(t *testing.T) { 10040 ci.Parallel(t) 10041 10042 require := require.New(t) 10043 10044 state := testStateStore(t) 10045 job, policy := mock.JobWithScalingPolicy() 10046 10047 var oldIndex uint64 = 1000 10048 require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, oldIndex, nil, job)) 10049 10050 ws := memdb.NewWatchSet() 10051 p1, err := state.ScalingPolicyByTargetAndType(ws, policy.Target, policy.Type) 10052 require.NoError(err) 10053 require.NotNil(p1) 10054 require.Equal(oldIndex, p1.CreateIndex) 10055 require.Equal(oldIndex, p1.ModifyIndex) 10056 prevId := p1.ID 10057 10058 index, err := state.Index("scaling_policy") 10059 require.NoError(err) 10060 require.Equal(oldIndex, index) 10061 require.NotEmpty(p1.ID) 10062 10063 // update the job with the updated scaling policy; make sure to use a different object 10064 newPolicy := p1.Copy() 10065 newPolicy.Policy["new-field"] = "new-value" 10066 job.TaskGroups[0].Scaling = newPolicy 10067 require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, oldIndex+100, nil, job)) 10068 require.True(watchFired(ws), "watch should have fired") 10069 10070 p2, err := state.ScalingPolicyByTargetAndType(nil, policy.Target, policy.Type) 10071 require.NoError(err) 10072 require.NotNil(p2) 10073 require.Equal(p2.Policy["new-field"], "new-value") 10074 require.Equal(prevId, p2.ID, "ID should not have changed") 10075 require.Equal(oldIndex, p2.CreateIndex) 10076 require.Greater(p2.ModifyIndex, oldIndex, "ModifyIndex should have advanced") 10077 10078 index, err = state.Index("scaling_policy") 10079 require.NoError(err) 10080 require.Greater(index, oldIndex, "table index should have advanced") 10081 } 10082 10083 func TestStateStore_DeleteScalingPolicies(t *testing.T) { 10084 ci.Parallel(t) 10085 10086 require := require.New(t) 10087 10088 state := testStateStore(t) 10089 policy := mock.ScalingPolicy() 10090 policy2 := mock.ScalingPolicy() 10091 10092 // Create the policy 10093 err := state.UpsertScalingPolicies(1000, []*structs.ScalingPolicy{policy, policy2}) 10094 require.NoError(err) 10095 10096 // Create a watcher 10097 ws := memdb.NewWatchSet() 10098 _, err = state.ScalingPolicyByTargetAndType(ws, policy.Target, policy.Type) 10099 require.NoError(err) 10100 10101 // Delete the policy 10102 err = state.DeleteScalingPolicies(1001, []string{policy.ID, policy2.ID}) 10103 require.NoError(err) 10104 10105 // Ensure watching triggered 10106 require.True(watchFired(ws)) 10107 10108 // Ensure we don't get the objects back 10109 ws = memdb.NewWatchSet() 10110 out, err := state.ScalingPolicyByTargetAndType(ws, policy.Target, policy.Type) 10111 require.NoError(err) 10112 require.Nil(out) 10113 10114 ws = memdb.NewWatchSet() 10115 out, err = state.ScalingPolicyByTargetAndType(ws, policy2.Target, policy2.Type) 10116 require.NoError(err) 10117 require.Nil(out) 10118 10119 // Ensure we see both policies 10120 iter, err := state.ScalingPoliciesByNamespace(ws, policy.Target[structs.ScalingTargetNamespace], "") 10121 require.NoError(err) 10122 count := 0 10123 for { 10124 raw := iter.Next() 10125 if raw == nil { 10126 break 10127 } 10128 count++ 10129 } 10130 require.Equal(0, count) 10131 10132 index, err := state.Index("scaling_policy") 10133 require.NoError(err) 10134 require.True(1001 == index) 10135 require.False(watchFired(ws)) 10136 } 10137 10138 func TestStateStore_StopJob_DeleteScalingPolicies(t *testing.T) { 10139 ci.Parallel(t) 10140 10141 require := require.New(t) 10142 10143 state := testStateStore(t) 10144 10145 job := mock.Job() 10146 10147 err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) 10148 require.NoError(err) 10149 10150 policy := mock.ScalingPolicy() 10151 policy.Target[structs.ScalingTargetJob] = job.ID 10152 err = state.UpsertScalingPolicies(1100, []*structs.ScalingPolicy{policy}) 10153 require.NoError(err) 10154 10155 // Ensure the scaling policy is present and start some watches 10156 wsGet := memdb.NewWatchSet() 10157 out, err := state.ScalingPolicyByTargetAndType(wsGet, policy.Target, policy.Type) 10158 require.NoError(err) 10159 require.NotNil(out) 10160 wsList := memdb.NewWatchSet() 10161 _, err = state.ScalingPolicies(wsList) 10162 require.NoError(err) 10163 10164 // Stop the job 10165 job, err = state.JobByID(nil, job.Namespace, job.ID) 10166 require.NoError(err) 10167 job.Stop = true 10168 err = state.UpsertJob(structs.MsgTypeTestSetup, 1200, nil, job) 10169 require.NoError(err) 10170 10171 // Ensure: 10172 // * the scaling policy was deleted 10173 // * the watches were fired 10174 // * the table index was advanced 10175 require.True(watchFired(wsGet)) 10176 require.True(watchFired(wsList)) 10177 out, err = state.ScalingPolicyByTargetAndType(nil, policy.Target, policy.Type) 10178 require.NoError(err) 10179 require.Nil(out) 10180 index, err := state.Index("scaling_policy") 10181 require.NoError(err) 10182 require.GreaterOrEqual(index, uint64(1200)) 10183 } 10184 10185 func TestStateStore_UnstopJob_UpsertScalingPolicies(t *testing.T) { 10186 ci.Parallel(t) 10187 10188 require := require.New(t) 10189 10190 state := testStateStore(t) 10191 10192 job, policy := mock.JobWithScalingPolicy() 10193 job.Stop = true 10194 10195 // establish watcher, verify there are no scaling policies yet 10196 ws := memdb.NewWatchSet() 10197 list, err := state.ScalingPolicies(ws) 10198 require.NoError(err) 10199 require.Nil(list.Next()) 10200 10201 // upsert a stopped job, verify that we don't fire the watcher or add any scaling policies 10202 err = state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) 10203 require.NoError(err) 10204 require.True(watchFired(ws)) 10205 list, err = state.ScalingPolicies(ws) 10206 require.NoError(err) 10207 require.NotNil(list.Next()) 10208 10209 // Establish a new watchset 10210 ws = memdb.NewWatchSet() 10211 _, err = state.ScalingPolicies(ws) 10212 require.NoError(err) 10213 // Unstop this job, say you'll run it again... 10214 job.Stop = false 10215 err = state.UpsertJob(structs.MsgTypeTestSetup, 1100, nil, job) 10216 require.NoError(err) 10217 10218 // Ensure the scaling policy still exists, watch was not fired, index was not advanced 10219 out, err := state.ScalingPolicyByTargetAndType(nil, policy.Target, policy.Type) 10220 require.NoError(err) 10221 require.NotNil(out) 10222 index, err := state.Index("scaling_policy") 10223 require.NoError(err) 10224 require.EqualValues(index, 1000) 10225 require.False(watchFired(ws)) 10226 } 10227 10228 func TestStateStore_DeleteJob_DeleteScalingPolicies(t *testing.T) { 10229 ci.Parallel(t) 10230 10231 require := require.New(t) 10232 10233 state := testStateStore(t) 10234 10235 job := mock.Job() 10236 10237 err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) 10238 require.NoError(err) 10239 10240 policy := mock.ScalingPolicy() 10241 policy.Target[structs.ScalingTargetJob] = job.ID 10242 err = state.UpsertScalingPolicies(1001, []*structs.ScalingPolicy{policy}) 10243 require.NoError(err) 10244 10245 // Delete the job 10246 err = state.DeleteJob(1002, job.Namespace, job.ID) 10247 require.NoError(err) 10248 10249 // Ensure the scaling policy was deleted 10250 ws := memdb.NewWatchSet() 10251 out, err := state.ScalingPolicyByTargetAndType(ws, policy.Target, policy.Type) 10252 require.NoError(err) 10253 require.Nil(out) 10254 index, err := state.Index("scaling_policy") 10255 require.NoError(err) 10256 require.True(index > 1001) 10257 } 10258 10259 func TestStateStore_DeleteJob_DeleteScalingPoliciesPrefixBug(t *testing.T) { 10260 ci.Parallel(t) 10261 10262 require := require.New(t) 10263 10264 state := testStateStore(t) 10265 10266 job := mock.Job() 10267 require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)) 10268 job2 := job.Copy() 10269 job2.ID = job.ID + "-but-longer" 10270 require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, job2)) 10271 10272 policy := mock.ScalingPolicy() 10273 policy.Target[structs.ScalingTargetJob] = job.ID 10274 policy2 := mock.ScalingPolicy() 10275 policy2.Target[structs.ScalingTargetJob] = job2.ID 10276 require.NoError(state.UpsertScalingPolicies(1002, []*structs.ScalingPolicy{policy, policy2})) 10277 10278 // Delete job with the shorter prefix-ID 10279 require.NoError(state.DeleteJob(1003, job.Namespace, job.ID)) 10280 10281 // Ensure only the associated scaling policy was deleted, not the one matching the job with the longer ID 10282 out, err := state.ScalingPolicyByID(nil, policy.ID) 10283 require.NoError(err) 10284 require.Nil(out) 10285 out, err = state.ScalingPolicyByID(nil, policy2.ID) 10286 require.NoError(err) 10287 require.NotNil(out) 10288 } 10289 10290 // This test ensures that deleting a job that doesn't have any scaling policies 10291 // will not cause the scaling_policy table index to increase, on either job 10292 // registration or deletion. 10293 func TestStateStore_DeleteJob_ScalingPolicyIndexNoop(t *testing.T) { 10294 ci.Parallel(t) 10295 10296 require := require.New(t) 10297 10298 state := testStateStore(t) 10299 10300 job := mock.Job() 10301 10302 prevIndex, err := state.Index("scaling_policy") 10303 require.NoError(err) 10304 10305 err = state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job) 10306 require.NoError(err) 10307 10308 newIndex, err := state.Index("scaling_policy") 10309 require.NoError(err) 10310 require.Equal(prevIndex, newIndex) 10311 10312 // Delete the job 10313 err = state.DeleteJob(1002, job.Namespace, job.ID) 10314 require.NoError(err) 10315 10316 newIndex, err = state.Index("scaling_policy") 10317 require.NoError(err) 10318 require.Equal(prevIndex, newIndex) 10319 } 10320 10321 func TestStateStore_ScalingPoliciesByType(t *testing.T) { 10322 ci.Parallel(t) 10323 10324 require := require.New(t) 10325 10326 state := testStateStore(t) 10327 10328 // Create scaling policies of different types 10329 pHorzA := mock.ScalingPolicy() 10330 pHorzA.Type = structs.ScalingPolicyTypeHorizontal 10331 pHorzB := mock.ScalingPolicy() 10332 pHorzB.Type = structs.ScalingPolicyTypeHorizontal 10333 10334 pOther1 := mock.ScalingPolicy() 10335 pOther1.Type = "other-type-1" 10336 10337 pOther2 := mock.ScalingPolicy() 10338 pOther2.Type = "other-type-2" 10339 10340 // Create search routine 10341 search := func(t string) (found []string) { 10342 found = []string{} 10343 iter, err := state.ScalingPoliciesByTypePrefix(nil, t) 10344 require.NoError(err) 10345 10346 for raw := iter.Next(); raw != nil; raw = iter.Next() { 10347 found = append(found, raw.(*structs.ScalingPolicy).Type) 10348 } 10349 return 10350 } 10351 10352 // Create the policies 10353 var baseIndex uint64 = 1000 10354 err := state.UpsertScalingPolicies(baseIndex, []*structs.ScalingPolicy{pHorzA, pHorzB, pOther1, pOther2}) 10355 require.NoError(err) 10356 10357 // Check if we can read horizontal policies 10358 expect := []string{pHorzA.Type, pHorzB.Type} 10359 actual := search(structs.ScalingPolicyTypeHorizontal) 10360 require.ElementsMatch(expect, actual) 10361 10362 // Check if we can read policies of other types 10363 expect = []string{pOther1.Type} 10364 actual = search("other-type-1") 10365 require.ElementsMatch(expect, actual) 10366 10367 // Check that we can read policies by prefix 10368 expect = []string{"other-type-1", "other-type-2"} 10369 actual = search("other-type") 10370 require.Equal(expect, actual) 10371 10372 // Check for empty result 10373 expect = []string{} 10374 actual = search("non-existing") 10375 require.ElementsMatch(expect, actual) 10376 } 10377 10378 func TestStateStore_ScalingPoliciesByTypePrefix(t *testing.T) { 10379 ci.Parallel(t) 10380 10381 require := require.New(t) 10382 10383 state := testStateStore(t) 10384 10385 // Create scaling policies of different types 10386 pHorzA := mock.ScalingPolicy() 10387 pHorzA.Type = structs.ScalingPolicyTypeHorizontal 10388 pHorzB := mock.ScalingPolicy() 10389 pHorzB.Type = structs.ScalingPolicyTypeHorizontal 10390 10391 pOther1 := mock.ScalingPolicy() 10392 pOther1.Type = "other-type-1" 10393 10394 pOther2 := mock.ScalingPolicy() 10395 pOther2.Type = "other-type-2" 10396 10397 // Create search routine 10398 search := func(t string) (count int, found []string, err error) { 10399 found = []string{} 10400 iter, err := state.ScalingPoliciesByTypePrefix(nil, t) 10401 if err != nil { 10402 return 10403 } 10404 10405 for raw := iter.Next(); raw != nil; raw = iter.Next() { 10406 count++ 10407 found = append(found, raw.(*structs.ScalingPolicy).Type) 10408 } 10409 return 10410 } 10411 10412 // Create the policies 10413 var baseIndex uint64 = 1000 10414 err := state.UpsertScalingPolicies(baseIndex, []*structs.ScalingPolicy{pHorzA, pHorzB, pOther1, pOther2}) 10415 require.NoError(err) 10416 10417 // Check if we can read horizontal policies 10418 expect := []string{pHorzA.Type, pHorzB.Type} 10419 count, found, err := search("h") 10420 10421 sort.Strings(found) 10422 sort.Strings(expect) 10423 10424 require.NoError(err) 10425 require.Equal(expect, found) 10426 require.Equal(2, count) 10427 10428 // Check if we can read other prefix policies 10429 expect = []string{pOther1.Type, pOther2.Type} 10430 count, found, err = search("other") 10431 10432 sort.Strings(found) 10433 sort.Strings(expect) 10434 10435 require.NoError(err) 10436 require.Equal(expect, found) 10437 require.Equal(2, count) 10438 10439 // Check for empty result 10440 expect = []string{} 10441 count, found, err = search("non-existing") 10442 10443 sort.Strings(found) 10444 sort.Strings(expect) 10445 10446 require.NoError(err) 10447 require.Equal(expect, found) 10448 require.Equal(0, count) 10449 } 10450 10451 func TestStateStore_ScalingPoliciesByJob(t *testing.T) { 10452 ci.Parallel(t) 10453 10454 require := require.New(t) 10455 10456 state := testStateStore(t) 10457 policyA := mock.ScalingPolicy() 10458 policyB1 := mock.ScalingPolicy() 10459 policyB2 := mock.ScalingPolicy() 10460 policyB1.Target[structs.ScalingTargetJob] = policyB2.Target[structs.ScalingTargetJob] 10461 10462 // Create the policies 10463 var baseIndex uint64 = 1000 10464 err := state.UpsertScalingPolicies(baseIndex, []*structs.ScalingPolicy{policyA, policyB1, policyB2}) 10465 require.NoError(err) 10466 10467 iter, err := state.ScalingPoliciesByJob(nil, 10468 policyA.Target[structs.ScalingTargetNamespace], 10469 policyA.Target[structs.ScalingTargetJob], "") 10470 require.NoError(err) 10471 10472 // Ensure we see expected policies 10473 count := 0 10474 found := []string{} 10475 for { 10476 raw := iter.Next() 10477 if raw == nil { 10478 break 10479 } 10480 count++ 10481 found = append(found, raw.(*structs.ScalingPolicy).Target[structs.ScalingTargetGroup]) 10482 } 10483 require.Equal(1, count) 10484 sort.Strings(found) 10485 expect := []string{policyA.Target[structs.ScalingTargetGroup]} 10486 sort.Strings(expect) 10487 require.Equal(expect, found) 10488 10489 iter, err = state.ScalingPoliciesByJob(nil, 10490 policyB1.Target[structs.ScalingTargetNamespace], 10491 policyB1.Target[structs.ScalingTargetJob], "") 10492 require.NoError(err) 10493 10494 // Ensure we see expected policies 10495 count = 0 10496 found = []string{} 10497 for { 10498 raw := iter.Next() 10499 if raw == nil { 10500 break 10501 } 10502 count++ 10503 found = append(found, raw.(*structs.ScalingPolicy).Target[structs.ScalingTargetGroup]) 10504 } 10505 require.Equal(2, count) 10506 sort.Strings(found) 10507 expect = []string{ 10508 policyB1.Target[structs.ScalingTargetGroup], 10509 policyB2.Target[structs.ScalingTargetGroup], 10510 } 10511 sort.Strings(expect) 10512 require.Equal(expect, found) 10513 } 10514 10515 func TestStateStore_ScalingPoliciesByJob_PrefixBug(t *testing.T) { 10516 ci.Parallel(t) 10517 10518 require := require.New(t) 10519 10520 jobPrefix := "job-name-" + uuid.Generate() 10521 10522 state := testStateStore(t) 10523 policy1 := mock.ScalingPolicy() 10524 policy1.Target[structs.ScalingTargetJob] = jobPrefix 10525 policy2 := mock.ScalingPolicy() 10526 policy2.Target[structs.ScalingTargetJob] = jobPrefix + "-more" 10527 10528 // Create the policies 10529 var baseIndex uint64 = 1000 10530 err := state.UpsertScalingPolicies(baseIndex, []*structs.ScalingPolicy{policy1, policy2}) 10531 require.NoError(err) 10532 10533 iter, err := state.ScalingPoliciesByJob(nil, 10534 policy1.Target[structs.ScalingTargetNamespace], 10535 jobPrefix, "") 10536 require.NoError(err) 10537 10538 // Ensure we see expected policies 10539 count := 0 10540 found := []string{} 10541 for { 10542 raw := iter.Next() 10543 if raw == nil { 10544 break 10545 } 10546 count++ 10547 found = append(found, raw.(*structs.ScalingPolicy).ID) 10548 } 10549 require.Equal(1, count) 10550 expect := []string{policy1.ID} 10551 require.Equal(expect, found) 10552 } 10553 10554 func TestStateStore_ScalingPolicyByTargetAndType(t *testing.T) { 10555 ci.Parallel(t) 10556 10557 require := require.New(t) 10558 10559 state := testStateStore(t) 10560 10561 // Create scaling policies 10562 policyA := mock.ScalingPolicy() 10563 // Same target, different type 10564 policyB := mock.ScalingPolicy() 10565 policyC := mock.ScalingPolicy() 10566 for k, v := range policyB.Target { 10567 policyC.Target[k] = v 10568 } 10569 policyC.Type = "other-type" 10570 10571 // Create the policies 10572 var baseIndex uint64 = 1000 10573 err := state.UpsertScalingPolicies(baseIndex, []*structs.ScalingPolicy{policyA, policyB, policyC}) 10574 require.NoError(err) 10575 10576 // Check if we can retrieve the right policies 10577 found, err := state.ScalingPolicyByTargetAndType(nil, policyA.Target, policyA.Type) 10578 require.NoError(err) 10579 require.Equal(policyA, found) 10580 10581 // Check for wrong type 10582 found, err = state.ScalingPolicyByTargetAndType(nil, policyA.Target, "wrong_type") 10583 require.NoError(err) 10584 require.Nil(found) 10585 10586 // Check for same target but different type 10587 found, err = state.ScalingPolicyByTargetAndType(nil, policyB.Target, policyB.Type) 10588 require.NoError(err) 10589 require.Equal(policyB, found) 10590 10591 found, err = state.ScalingPolicyByTargetAndType(nil, policyB.Target, policyC.Type) 10592 require.NoError(err) 10593 require.Equal(policyC, found) 10594 } 10595 10596 func TestStateStore_UpsertScalingEvent(t *testing.T) { 10597 ci.Parallel(t) 10598 require := require.New(t) 10599 10600 state := testStateStore(t) 10601 job := mock.Job() 10602 groupName := job.TaskGroups[0].Name 10603 10604 newEvent := structs.NewScalingEvent("message 1").SetMeta(map[string]interface{}{ 10605 "a": 1, 10606 }) 10607 10608 wsAll := memdb.NewWatchSet() 10609 all, err := state.ScalingEvents(wsAll) 10610 require.NoError(err) 10611 require.Nil(all.Next()) 10612 10613 ws := memdb.NewWatchSet() 10614 out, _, err := state.ScalingEventsByJob(ws, job.Namespace, job.ID) 10615 require.NoError(err) 10616 require.Nil(out) 10617 10618 err = state.UpsertScalingEvent(1000, &structs.ScalingEventRequest{ 10619 Namespace: job.Namespace, 10620 JobID: job.ID, 10621 TaskGroup: groupName, 10622 ScalingEvent: newEvent, 10623 }) 10624 require.NoError(err) 10625 require.True(watchFired(ws)) 10626 require.True(watchFired(wsAll)) 10627 10628 ws = memdb.NewWatchSet() 10629 out, eventsIndex, err := state.ScalingEventsByJob(ws, job.Namespace, job.ID) 10630 require.NoError(err) 10631 require.Equal(map[string][]*structs.ScalingEvent{ 10632 groupName: {newEvent}, 10633 }, out) 10634 require.EqualValues(eventsIndex, 1000) 10635 10636 iter, err := state.ScalingEvents(ws) 10637 require.NoError(err) 10638 10639 count := 0 10640 jobsReturned := []string{} 10641 var jobEvents *structs.JobScalingEvents 10642 for { 10643 raw := iter.Next() 10644 if raw == nil { 10645 break 10646 } 10647 jobEvents = raw.(*structs.JobScalingEvents) 10648 jobsReturned = append(jobsReturned, jobEvents.JobID) 10649 count++ 10650 } 10651 require.Equal(1, count) 10652 require.EqualValues(jobEvents.ModifyIndex, 1000) 10653 require.EqualValues(jobEvents.ScalingEvents[groupName][0].CreateIndex, 1000) 10654 10655 index, err := state.Index("scaling_event") 10656 require.NoError(err) 10657 require.ElementsMatch([]string{job.ID}, jobsReturned) 10658 require.Equal(map[string][]*structs.ScalingEvent{ 10659 groupName: {newEvent}, 10660 }, jobEvents.ScalingEvents) 10661 require.EqualValues(1000, index) 10662 require.False(watchFired(ws)) 10663 } 10664 10665 func TestStateStore_UpsertScalingEvent_LimitAndOrder(t *testing.T) { 10666 ci.Parallel(t) 10667 require := require.New(t) 10668 10669 state := testStateStore(t) 10670 namespace := uuid.Generate() 10671 jobID := uuid.Generate() 10672 group1 := uuid.Generate() 10673 group2 := uuid.Generate() 10674 10675 index := uint64(1000) 10676 for i := 1; i <= structs.JobTrackedScalingEvents+10; i++ { 10677 newEvent := structs.NewScalingEvent("").SetMeta(map[string]interface{}{ 10678 "i": i, 10679 "group": group1, 10680 }) 10681 err := state.UpsertScalingEvent(index, &structs.ScalingEventRequest{ 10682 Namespace: namespace, 10683 JobID: jobID, 10684 TaskGroup: group1, 10685 ScalingEvent: newEvent, 10686 }) 10687 index++ 10688 require.NoError(err) 10689 10690 newEvent = structs.NewScalingEvent("").SetMeta(map[string]interface{}{ 10691 "i": i, 10692 "group": group2, 10693 }) 10694 err = state.UpsertScalingEvent(index, &structs.ScalingEventRequest{ 10695 Namespace: namespace, 10696 JobID: jobID, 10697 TaskGroup: group2, 10698 ScalingEvent: newEvent, 10699 }) 10700 index++ 10701 require.NoError(err) 10702 } 10703 10704 out, _, err := state.ScalingEventsByJob(nil, namespace, jobID) 10705 require.NoError(err) 10706 require.Len(out, 2) 10707 10708 expectedEvents := []int{} 10709 for i := structs.JobTrackedScalingEvents; i > 0; i-- { 10710 expectedEvents = append(expectedEvents, i+10) 10711 } 10712 10713 // checking order and content 10714 require.Len(out[group1], structs.JobTrackedScalingEvents) 10715 actualEvents := []int{} 10716 for _, event := range out[group1] { 10717 require.Equal(group1, event.Meta["group"]) 10718 actualEvents = append(actualEvents, event.Meta["i"].(int)) 10719 } 10720 require.Equal(expectedEvents, actualEvents) 10721 10722 // checking order and content 10723 require.Len(out[group2], structs.JobTrackedScalingEvents) 10724 actualEvents = []int{} 10725 for _, event := range out[group2] { 10726 require.Equal(group2, event.Meta["group"]) 10727 actualEvents = append(actualEvents, event.Meta["i"].(int)) 10728 } 10729 require.Equal(expectedEvents, actualEvents) 10730 } 10731 10732 func TestStateStore_RootKeyMetaData_CRUD(t *testing.T) { 10733 ci.Parallel(t) 10734 store := testStateStore(t) 10735 index, err := store.LatestIndex() 10736 require.NoError(t, err) 10737 10738 // create 3 default keys, one of which is active 10739 keyIDs := []string{} 10740 for i := 0; i < 3; i++ { 10741 key := structs.NewRootKeyMeta() 10742 keyIDs = append(keyIDs, key.KeyID) 10743 if i == 0 { 10744 key.SetActive() 10745 } 10746 index++ 10747 require.NoError(t, store.UpsertRootKeyMeta(index, key, false)) 10748 } 10749 10750 // retrieve the active key 10751 activeKey, err := store.GetActiveRootKeyMeta(nil) 10752 require.NoError(t, err) 10753 require.NotNil(t, activeKey) 10754 10755 // update an inactive key to active and verify the rotation 10756 inactiveKey, err := store.RootKeyMetaByID(nil, keyIDs[1]) 10757 require.NoError(t, err) 10758 require.NotNil(t, inactiveKey) 10759 oldCreateIndex := inactiveKey.CreateIndex 10760 newlyActiveKey := inactiveKey.Copy() 10761 newlyActiveKey.SetActive() 10762 index++ 10763 require.NoError(t, store.UpsertRootKeyMeta(index, newlyActiveKey, false)) 10764 10765 iter, err := store.RootKeyMetas(nil) 10766 require.NoError(t, err) 10767 for { 10768 raw := iter.Next() 10769 if raw == nil { 10770 break 10771 } 10772 key := raw.(*structs.RootKeyMeta) 10773 if key.KeyID == newlyActiveKey.KeyID { 10774 require.True(t, key.Active(), "expected updated key to be active") 10775 require.Equal(t, oldCreateIndex, key.CreateIndex) 10776 } else { 10777 require.False(t, key.Active(), "expected other keys to be inactive") 10778 } 10779 } 10780 10781 // delete the active key and verify it's been deleted 10782 index++ 10783 require.NoError(t, store.DeleteRootKeyMeta(index, keyIDs[1])) 10784 10785 iter, err = store.RootKeyMetas(nil) 10786 require.NoError(t, err) 10787 var found int 10788 for { 10789 raw := iter.Next() 10790 if raw == nil { 10791 break 10792 } 10793 key := raw.(*structs.RootKeyMeta) 10794 require.NotEqual(t, keyIDs[1], key.KeyID) 10795 require.False(t, key.Active(), "expected remaining keys to be inactive") 10796 found++ 10797 } 10798 require.Equal(t, 2, found, "expected only 2 keys remaining") 10799 } 10800 10801 func TestStateStore_Abandon(t *testing.T) { 10802 ci.Parallel(t) 10803 10804 s := testStateStore(t) 10805 abandonCh := s.AbandonCh() 10806 s.Abandon() 10807 select { 10808 case <-abandonCh: 10809 default: 10810 t.Fatalf("bad") 10811 } 10812 } 10813 10814 // Verifies that an error is returned when an allocation doesn't exist in the state store. 10815 func TestStateSnapshot_DenormalizeAllocationDiffSlice_AllocDoesNotExist(t *testing.T) { 10816 ci.Parallel(t) 10817 10818 state := testStateStore(t) 10819 alloc := mock.Alloc() 10820 require := require.New(t) 10821 10822 // Insert job 10823 err := state.UpsertJob(structs.MsgTypeTestSetup, 999, nil, alloc.Job) 10824 require.NoError(err) 10825 10826 allocDiffs := []*structs.AllocationDiff{ 10827 { 10828 ID: alloc.ID, 10829 }, 10830 } 10831 10832 snap, err := state.Snapshot() 10833 require.NoError(err) 10834 10835 denormalizedAllocs, err := snap.DenormalizeAllocationDiffSlice(allocDiffs) 10836 10837 require.EqualError(err, fmt.Sprintf("alloc %v doesn't exist", alloc.ID)) 10838 require.Nil(denormalizedAllocs) 10839 } 10840 10841 // TestStateStore_SnapshotMinIndex_OK asserts StateStore.SnapshotMinIndex blocks 10842 // until the StateStore's latest index is >= the requested index. 10843 func TestStateStore_SnapshotMinIndex_OK(t *testing.T) { 10844 ci.Parallel(t) 10845 10846 s := testStateStore(t) 10847 index, err := s.LatestIndex() 10848 require.NoError(t, err) 10849 10850 node := mock.Node() 10851 require.NoError(t, s.UpsertNode(structs.MsgTypeTestSetup, index+1, node)) 10852 10853 // Assert SnapshotMinIndex returns immediately if index < latest index 10854 ctx, cancel := context.WithTimeout(context.Background(), 0) 10855 snap, err := s.SnapshotMinIndex(ctx, index) 10856 cancel() 10857 require.NoError(t, err) 10858 10859 snapIndex, err := snap.LatestIndex() 10860 require.NoError(t, err) 10861 if snapIndex <= index { 10862 require.Fail(t, "snapshot index should be greater than index") 10863 } 10864 10865 // Assert SnapshotMinIndex returns immediately if index == latest index 10866 ctx, cancel = context.WithTimeout(context.Background(), 0) 10867 snap, err = s.SnapshotMinIndex(ctx, index+1) 10868 cancel() 10869 require.NoError(t, err) 10870 10871 snapIndex, err = snap.LatestIndex() 10872 require.NoError(t, err) 10873 require.Equal(t, snapIndex, index+1) 10874 10875 // Assert SnapshotMinIndex blocks if index > latest index 10876 errCh := make(chan error, 1) 10877 ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second) 10878 defer cancel() 10879 go func() { 10880 defer close(errCh) 10881 waitIndex := index + 2 10882 snap, err := s.SnapshotMinIndex(ctx, waitIndex) 10883 if err != nil { 10884 errCh <- err 10885 return 10886 } 10887 10888 snapIndex, err := snap.LatestIndex() 10889 if err != nil { 10890 errCh <- err 10891 return 10892 } 10893 10894 if snapIndex < waitIndex { 10895 errCh <- fmt.Errorf("snapshot index < wait index: %d < %d", snapIndex, waitIndex) 10896 return 10897 } 10898 }() 10899 10900 select { 10901 case err := <-errCh: 10902 require.NoError(t, err) 10903 case <-time.After(500 * time.Millisecond): 10904 // Let it block for a bit before unblocking by upserting 10905 } 10906 10907 node.Name = "hal" 10908 require.NoError(t, s.UpsertNode(structs.MsgTypeTestSetup, index+2, node)) 10909 10910 select { 10911 case err := <-errCh: 10912 require.NoError(t, err) 10913 case <-time.After(5 * time.Second): 10914 require.Fail(t, "timed out waiting for SnapshotMinIndex to unblock") 10915 } 10916 } 10917 10918 // TestStateStore_SnapshotMinIndex_Timeout asserts StateStore.SnapshotMinIndex 10919 // returns an error if the desired index is not reached within the deadline. 10920 func TestStateStore_SnapshotMinIndex_Timeout(t *testing.T) { 10921 ci.Parallel(t) 10922 10923 s := testStateStore(t) 10924 index, err := s.LatestIndex() 10925 require.NoError(t, err) 10926 10927 // Assert SnapshotMinIndex blocks if index > latest index 10928 ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) 10929 defer cancel() 10930 snap, err := s.SnapshotMinIndex(ctx, index+1) 10931 require.EqualError(t, err, context.DeadlineExceeded.Error()) 10932 require.Nil(t, snap) 10933 } 10934 10935 // watchFired is a helper for unit tests that returns if the given watch set 10936 // fired (it doesn't care which watch actually fired). This uses a fixed 10937 // timeout since we already expect the event happened before calling this and 10938 // just need to distinguish a fire from a timeout. We do need a little time to 10939 // allow the watch to set up any goroutines, though. 10940 func watchFired(ws memdb.WatchSet) bool { 10941 timedOut := ws.Watch(time.After(50 * time.Millisecond)) 10942 return !timedOut 10943 } 10944 10945 // NodeIDSort is used to sort nodes by ID 10946 type NodeIDSort []*structs.Node 10947 10948 func (n NodeIDSort) Len() int { 10949 return len(n) 10950 } 10951 10952 func (n NodeIDSort) Less(i, j int) bool { 10953 return n[i].ID < n[j].ID 10954 } 10955 10956 func (n NodeIDSort) Swap(i, j int) { 10957 n[i], n[j] = n[j], n[i] 10958 } 10959 10960 // JobIDis used to sort jobs by id 10961 type JobIDSort []*structs.Job 10962 10963 func (n JobIDSort) Len() int { 10964 return len(n) 10965 } 10966 10967 func (n JobIDSort) Less(i, j int) bool { 10968 return n[i].ID < n[j].ID 10969 } 10970 10971 func (n JobIDSort) Swap(i, j int) { 10972 n[i], n[j] = n[j], n[i] 10973 } 10974 10975 // EvalIDis used to sort evals by id 10976 type EvalIDSort []*structs.Evaluation 10977 10978 func (n EvalIDSort) Len() int { 10979 return len(n) 10980 } 10981 10982 func (n EvalIDSort) Less(i, j int) bool { 10983 return n[i].ID < n[j].ID 10984 } 10985 10986 func (n EvalIDSort) Swap(i, j int) { 10987 n[i], n[j] = n[j], n[i] 10988 } 10989 10990 // AllocIDsort used to sort allocations by id 10991 type AllocIDSort []*structs.Allocation 10992 10993 func (n AllocIDSort) Len() int { 10994 return len(n) 10995 } 10996 10997 func (n AllocIDSort) Less(i, j int) bool { 10998 return n[i].ID < n[j].ID 10999 } 11000 11001 func (n AllocIDSort) Swap(i, j int) { 11002 n[i], n[j] = n[j], n[i] 11003 } 11004 11005 // nextIndex gets the LatestIndex for this store and assumes no error 11006 // note: this helper is not safe for concurrent use 11007 func nextIndex(s *StateStore) uint64 { 11008 index, _ := s.LatestIndex() 11009 index++ 11010 return index 11011 }