github.com/iqoqo/nomad@v0.11.3-0.20200911112621-d7021c74d101/nomad/state/state_store_test.go (about) 1 package state 2 3 import ( 4 "context" 5 "fmt" 6 "reflect" 7 "sort" 8 "strings" 9 "testing" 10 "time" 11 12 "github.com/hashicorp/go-memdb" 13 "github.com/kr/pretty" 14 "github.com/stretchr/testify/assert" 15 "github.com/stretchr/testify/require" 16 17 "github.com/hashicorp/nomad/helper" 18 "github.com/hashicorp/nomad/helper/uuid" 19 "github.com/hashicorp/nomad/nomad/mock" 20 "github.com/hashicorp/nomad/nomad/structs" 21 ) 22 23 func testStateStore(t *testing.T) *StateStore { 24 return TestStateStore(t) 25 } 26 27 func TestStateStore_Blocking_Error(t *testing.T) { 28 t.Parallel() 29 30 expected := fmt.Errorf("test error") 31 errFn := func(memdb.WatchSet, *StateStore) (interface{}, uint64, error) { 32 return nil, 0, expected 33 } 34 35 state := testStateStore(t) 36 _, idx, err := state.BlockingQuery(errFn, 10, context.Background()) 37 assert.EqualError(t, err, expected.Error()) 38 assert.Zero(t, idx) 39 } 40 41 func TestStateStore_Blocking_Timeout(t *testing.T) { 42 t.Parallel() 43 44 noopFn := func(memdb.WatchSet, *StateStore) (interface{}, uint64, error) { 45 return nil, 5, nil 46 } 47 48 state := testStateStore(t) 49 timeout := time.Now().Add(250 * time.Millisecond) 50 deadlineCtx, cancel := context.WithDeadline(context.Background(), timeout) 51 defer cancel() 52 53 _, idx, err := state.BlockingQuery(noopFn, 10, deadlineCtx) 54 assert.EqualError(t, err, context.DeadlineExceeded.Error()) 55 assert.EqualValues(t, 5, idx) 56 assert.WithinDuration(t, timeout, time.Now(), 100*time.Millisecond) 57 } 58 59 func TestStateStore_Blocking_MinQuery(t *testing.T) { 60 t.Parallel() 61 62 node := mock.Node() 63 count := 0 64 queryFn := func(ws memdb.WatchSet, s *StateStore) (interface{}, uint64, error) { 65 _, err := s.NodeByID(ws, node.ID) 66 if err != nil { 67 return nil, 0, err 68 } 69 70 count++ 71 if count == 1 { 72 return false, 5, nil 73 } else if count > 2 { 74 return false, 20, fmt.Errorf("called too many times") 75 } 76 77 return true, 11, nil 78 } 79 80 state := testStateStore(t) 81 timeout := time.Now().Add(100 * time.Millisecond) 82 deadlineCtx, cancel := context.WithDeadline(context.Background(), timeout) 83 defer cancel() 84 85 time.AfterFunc(5*time.Millisecond, func() { 86 state.UpsertNode(11, node) 87 }) 88 89 resp, idx, err := state.BlockingQuery(queryFn, 10, deadlineCtx) 90 if assert.Nil(t, err) { 91 assert.Equal(t, 2, count) 92 assert.EqualValues(t, 11, idx) 93 assert.True(t, resp.(bool)) 94 } 95 } 96 97 // COMPAT 0.11: Uses AllocUpdateRequest.Alloc 98 // This test checks that: 99 // 1) The job is denormalized 100 // 2) Allocations are created 101 func TestStateStore_UpsertPlanResults_AllocationsCreated_Denormalized(t *testing.T) { 102 t.Parallel() 103 104 state := testStateStore(t) 105 alloc := mock.Alloc() 106 job := alloc.Job 107 alloc.Job = nil 108 109 if err := state.UpsertJob(999, job); err != nil { 110 t.Fatalf("err: %v", err) 111 } 112 113 eval := mock.Eval() 114 eval.JobID = job.ID 115 116 // Create an eval 117 if err := state.UpsertEvals(1, []*structs.Evaluation{eval}); err != nil { 118 t.Fatalf("err: %v", err) 119 } 120 121 // Create a plan result 122 res := structs.ApplyPlanResultsRequest{ 123 AllocUpdateRequest: structs.AllocUpdateRequest{ 124 Alloc: []*structs.Allocation{alloc}, 125 Job: job, 126 }, 127 EvalID: eval.ID, 128 } 129 assert := assert.New(t) 130 err := state.UpsertPlanResults(1000, &res) 131 assert.Nil(err) 132 133 ws := memdb.NewWatchSet() 134 out, err := state.AllocByID(ws, alloc.ID) 135 assert.Nil(err) 136 assert.Equal(alloc, out) 137 138 index, err := state.Index("allocs") 139 assert.Nil(err) 140 assert.EqualValues(1000, index) 141 142 if watchFired(ws) { 143 t.Fatalf("bad") 144 } 145 146 evalOut, err := state.EvalByID(ws, eval.ID) 147 assert.Nil(err) 148 assert.NotNil(evalOut) 149 assert.EqualValues(1000, evalOut.ModifyIndex) 150 } 151 152 // This test checks that: 153 // 1) The job is denormalized 154 // 2) Allocations are denormalized and updated with the diff 155 // That stopped allocs Job is unmodified 156 func TestStateStore_UpsertPlanResults_AllocationsDenormalized(t *testing.T) { 157 t.Parallel() 158 159 state := testStateStore(t) 160 alloc := mock.Alloc() 161 job := alloc.Job 162 alloc.Job = nil 163 164 stoppedAlloc := mock.Alloc() 165 stoppedAlloc.Job = job 166 stoppedAllocDiff := &structs.AllocationDiff{ 167 ID: stoppedAlloc.ID, 168 DesiredDescription: "desired desc", 169 ClientStatus: structs.AllocClientStatusLost, 170 } 171 preemptedAlloc := mock.Alloc() 172 preemptedAlloc.Job = job 173 preemptedAllocDiff := &structs.AllocationDiff{ 174 ID: preemptedAlloc.ID, 175 PreemptedByAllocation: alloc.ID, 176 } 177 178 require := require.New(t) 179 require.NoError(state.UpsertAllocs(900, []*structs.Allocation{stoppedAlloc, preemptedAlloc})) 180 require.NoError(state.UpsertJob(999, job)) 181 182 // modify job and ensure that stopped and preempted alloc point to original Job 183 mJob := job.Copy() 184 mJob.TaskGroups[0].Name = "other" 185 186 require.NoError(state.UpsertJob(1001, mJob)) 187 188 eval := mock.Eval() 189 eval.JobID = job.ID 190 191 // Create an eval 192 require.NoError(state.UpsertEvals(1, []*structs.Evaluation{eval})) 193 194 // Create a plan result 195 res := structs.ApplyPlanResultsRequest{ 196 AllocUpdateRequest: structs.AllocUpdateRequest{ 197 AllocsUpdated: []*structs.Allocation{alloc}, 198 AllocsStopped: []*structs.AllocationDiff{stoppedAllocDiff}, 199 Job: mJob, 200 }, 201 EvalID: eval.ID, 202 AllocsPreempted: []*structs.AllocationDiff{preemptedAllocDiff}, 203 } 204 assert := assert.New(t) 205 planModifyIndex := uint64(1000) 206 err := state.UpsertPlanResults(planModifyIndex, &res) 207 require.NoError(err) 208 209 ws := memdb.NewWatchSet() 210 out, err := state.AllocByID(ws, alloc.ID) 211 require.NoError(err) 212 assert.Equal(alloc, out) 213 214 outJob, err := state.JobByID(ws, job.Namespace, job.ID) 215 require.NoError(err) 216 require.Equal(mJob.TaskGroups, outJob.TaskGroups) 217 require.NotEmpty(job.TaskGroups, outJob.TaskGroups) 218 219 updatedStoppedAlloc, err := state.AllocByID(ws, stoppedAlloc.ID) 220 require.NoError(err) 221 assert.Equal(stoppedAllocDiff.DesiredDescription, updatedStoppedAlloc.DesiredDescription) 222 assert.Equal(structs.AllocDesiredStatusStop, updatedStoppedAlloc.DesiredStatus) 223 assert.Equal(stoppedAllocDiff.ClientStatus, updatedStoppedAlloc.ClientStatus) 224 assert.Equal(planModifyIndex, updatedStoppedAlloc.AllocModifyIndex) 225 assert.Equal(planModifyIndex, updatedStoppedAlloc.AllocModifyIndex) 226 assert.Equal(job.TaskGroups, updatedStoppedAlloc.Job.TaskGroups) 227 228 updatedPreemptedAlloc, err := state.AllocByID(ws, preemptedAlloc.ID) 229 require.NoError(err) 230 assert.Equal(structs.AllocDesiredStatusEvict, updatedPreemptedAlloc.DesiredStatus) 231 assert.Equal(preemptedAllocDiff.PreemptedByAllocation, updatedPreemptedAlloc.PreemptedByAllocation) 232 assert.Equal(planModifyIndex, updatedPreemptedAlloc.AllocModifyIndex) 233 assert.Equal(planModifyIndex, updatedPreemptedAlloc.AllocModifyIndex) 234 assert.Equal(job.TaskGroups, updatedPreemptedAlloc.Job.TaskGroups) 235 236 index, err := state.Index("allocs") 237 require.NoError(err) 238 assert.EqualValues(planModifyIndex, index) 239 240 require.False(watchFired(ws)) 241 242 evalOut, err := state.EvalByID(ws, eval.ID) 243 require.NoError(err) 244 require.NotNil(evalOut) 245 assert.EqualValues(planModifyIndex, evalOut.ModifyIndex) 246 247 } 248 249 // This test checks that the deployment is created and allocations count towards 250 // the deployment 251 func TestStateStore_UpsertPlanResults_Deployment(t *testing.T) { 252 t.Parallel() 253 254 state := testStateStore(t) 255 alloc := mock.Alloc() 256 alloc2 := mock.Alloc() 257 job := alloc.Job 258 alloc.Job = nil 259 alloc2.Job = nil 260 261 d := mock.Deployment() 262 alloc.DeploymentID = d.ID 263 alloc2.DeploymentID = d.ID 264 265 if err := state.UpsertJob(999, job); err != nil { 266 t.Fatalf("err: %v", err) 267 } 268 269 eval := mock.Eval() 270 eval.JobID = job.ID 271 272 // Create an eval 273 if err := state.UpsertEvals(1, []*structs.Evaluation{eval}); err != nil { 274 t.Fatalf("err: %v", err) 275 } 276 277 // Create a plan result 278 res := structs.ApplyPlanResultsRequest{ 279 AllocUpdateRequest: structs.AllocUpdateRequest{ 280 Alloc: []*structs.Allocation{alloc, alloc2}, 281 Job: job, 282 }, 283 Deployment: d, 284 EvalID: eval.ID, 285 } 286 287 err := state.UpsertPlanResults(1000, &res) 288 if err != nil { 289 t.Fatalf("err: %v", err) 290 } 291 292 ws := memdb.NewWatchSet() 293 assert := assert.New(t) 294 out, err := state.AllocByID(ws, alloc.ID) 295 assert.Nil(err) 296 assert.Equal(alloc, out) 297 298 dout, err := state.DeploymentByID(ws, d.ID) 299 assert.Nil(err) 300 assert.NotNil(dout) 301 302 tg, ok := dout.TaskGroups[alloc.TaskGroup] 303 assert.True(ok) 304 assert.NotNil(tg) 305 assert.Equal(2, tg.PlacedAllocs) 306 307 evalOut, err := state.EvalByID(ws, eval.ID) 308 assert.Nil(err) 309 assert.NotNil(evalOut) 310 assert.EqualValues(1000, evalOut.ModifyIndex) 311 312 if watchFired(ws) { 313 t.Fatalf("bad") 314 } 315 316 // Update the allocs to be part of a new deployment 317 d2 := d.Copy() 318 d2.ID = uuid.Generate() 319 320 allocNew := alloc.Copy() 321 allocNew.DeploymentID = d2.ID 322 allocNew2 := alloc2.Copy() 323 allocNew2.DeploymentID = d2.ID 324 325 // Create another plan 326 res = structs.ApplyPlanResultsRequest{ 327 AllocUpdateRequest: structs.AllocUpdateRequest{ 328 Alloc: []*structs.Allocation{allocNew, allocNew2}, 329 Job: job, 330 }, 331 Deployment: d2, 332 EvalID: eval.ID, 333 } 334 335 err = state.UpsertPlanResults(1001, &res) 336 if err != nil { 337 t.Fatalf("err: %v", err) 338 } 339 340 dout, err = state.DeploymentByID(ws, d2.ID) 341 assert.Nil(err) 342 assert.NotNil(dout) 343 344 tg, ok = dout.TaskGroups[alloc.TaskGroup] 345 assert.True(ok) 346 assert.NotNil(tg) 347 assert.Equal(2, tg.PlacedAllocs) 348 349 evalOut, err = state.EvalByID(ws, eval.ID) 350 assert.Nil(err) 351 assert.NotNil(evalOut) 352 assert.EqualValues(1001, evalOut.ModifyIndex) 353 } 354 355 // This test checks that: 356 // 1) Preempted allocations in plan results are updated 357 // 2) Evals are inserted for preempted jobs 358 func TestStateStore_UpsertPlanResults_PreemptedAllocs(t *testing.T) { 359 t.Parallel() 360 require := require.New(t) 361 362 state := testStateStore(t) 363 alloc := mock.Alloc() 364 job := alloc.Job 365 alloc.Job = nil 366 367 // Insert job 368 err := state.UpsertJob(999, job) 369 require.NoError(err) 370 371 // Create an eval 372 eval := mock.Eval() 373 eval.JobID = job.ID 374 err = state.UpsertEvals(1, []*structs.Evaluation{eval}) 375 require.NoError(err) 376 377 // Insert alloc that will be preempted in the plan 378 preemptedAlloc := mock.Alloc() 379 err = state.UpsertAllocs(2, []*structs.Allocation{preemptedAlloc}) 380 require.NoError(err) 381 382 minimalPreemptedAlloc := &structs.Allocation{ 383 ID: preemptedAlloc.ID, 384 PreemptedByAllocation: alloc.ID, 385 ModifyTime: time.Now().Unix(), 386 } 387 388 // Create eval for preempted job 389 eval2 := mock.Eval() 390 eval2.JobID = preemptedAlloc.JobID 391 392 // Create a plan result 393 res := structs.ApplyPlanResultsRequest{ 394 AllocUpdateRequest: structs.AllocUpdateRequest{ 395 Alloc: []*structs.Allocation{alloc}, 396 Job: job, 397 }, 398 EvalID: eval.ID, 399 NodePreemptions: []*structs.Allocation{minimalPreemptedAlloc}, 400 PreemptionEvals: []*structs.Evaluation{eval2}, 401 } 402 403 err = state.UpsertPlanResults(1000, &res) 404 require.NoError(err) 405 406 ws := memdb.NewWatchSet() 407 408 // Verify alloc and eval created by plan 409 out, err := state.AllocByID(ws, alloc.ID) 410 require.NoError(err) 411 require.Equal(alloc, out) 412 413 index, err := state.Index("allocs") 414 require.NoError(err) 415 require.EqualValues(1000, index) 416 417 evalOut, err := state.EvalByID(ws, eval.ID) 418 require.NoError(err) 419 require.NotNil(evalOut) 420 require.EqualValues(1000, evalOut.ModifyIndex) 421 422 // Verify preempted alloc 423 preempted, err := state.AllocByID(ws, preemptedAlloc.ID) 424 require.NoError(err) 425 require.Equal(preempted.DesiredStatus, structs.AllocDesiredStatusEvict) 426 require.Equal(preempted.DesiredDescription, fmt.Sprintf("Preempted by alloc ID %v", alloc.ID)) 427 require.Equal(preempted.Job.ID, preemptedAlloc.Job.ID) 428 require.Equal(preempted.Job, preemptedAlloc.Job) 429 430 // Verify eval for preempted job 431 preemptedJobEval, err := state.EvalByID(ws, eval2.ID) 432 require.NoError(err) 433 require.NotNil(preemptedJobEval) 434 require.EqualValues(1000, preemptedJobEval.ModifyIndex) 435 436 } 437 438 // This test checks that deployment updates are applied correctly 439 func TestStateStore_UpsertPlanResults_DeploymentUpdates(t *testing.T) { 440 t.Parallel() 441 state := testStateStore(t) 442 443 // Create a job that applies to all 444 job := mock.Job() 445 if err := state.UpsertJob(998, job); err != nil { 446 t.Fatalf("err: %v", err) 447 } 448 449 // Create a deployment that we will update its status 450 doutstanding := mock.Deployment() 451 doutstanding.JobID = job.ID 452 453 if err := state.UpsertDeployment(1000, doutstanding); err != nil { 454 t.Fatalf("err: %v", err) 455 } 456 457 eval := mock.Eval() 458 eval.JobID = job.ID 459 460 // Create an eval 461 if err := state.UpsertEvals(1, []*structs.Evaluation{eval}); err != nil { 462 t.Fatalf("err: %v", err) 463 } 464 alloc := mock.Alloc() 465 alloc.Job = nil 466 467 dnew := mock.Deployment() 468 dnew.JobID = job.ID 469 alloc.DeploymentID = dnew.ID 470 471 // Update the old deployment 472 update := &structs.DeploymentStatusUpdate{ 473 DeploymentID: doutstanding.ID, 474 Status: "foo", 475 StatusDescription: "bar", 476 } 477 478 // Create a plan result 479 res := structs.ApplyPlanResultsRequest{ 480 AllocUpdateRequest: structs.AllocUpdateRequest{ 481 Alloc: []*structs.Allocation{alloc}, 482 Job: job, 483 }, 484 Deployment: dnew, 485 DeploymentUpdates: []*structs.DeploymentStatusUpdate{update}, 486 EvalID: eval.ID, 487 } 488 489 err := state.UpsertPlanResults(1000, &res) 490 if err != nil { 491 t.Fatalf("err: %v", err) 492 } 493 assert := assert.New(t) 494 ws := memdb.NewWatchSet() 495 496 // Check the deployments are correctly updated. 497 dout, err := state.DeploymentByID(ws, dnew.ID) 498 assert.Nil(err) 499 assert.NotNil(dout) 500 501 tg, ok := dout.TaskGroups[alloc.TaskGroup] 502 assert.True(ok) 503 assert.NotNil(tg) 504 assert.Equal(1, tg.PlacedAllocs) 505 506 doutstandingout, err := state.DeploymentByID(ws, doutstanding.ID) 507 assert.Nil(err) 508 assert.NotNil(doutstandingout) 509 assert.Equal(update.Status, doutstandingout.Status) 510 assert.Equal(update.StatusDescription, doutstandingout.StatusDescription) 511 assert.EqualValues(1000, doutstandingout.ModifyIndex) 512 513 evalOut, err := state.EvalByID(ws, eval.ID) 514 assert.Nil(err) 515 assert.NotNil(evalOut) 516 assert.EqualValues(1000, evalOut.ModifyIndex) 517 if watchFired(ws) { 518 t.Fatalf("bad") 519 } 520 } 521 522 func TestStateStore_UpsertDeployment(t *testing.T) { 523 t.Parallel() 524 525 state := testStateStore(t) 526 deployment := mock.Deployment() 527 528 // Create a watchset so we can test that upsert fires the watch 529 ws := memdb.NewWatchSet() 530 _, err := state.DeploymentsByJobID(ws, deployment.Namespace, deployment.ID, true) 531 if err != nil { 532 t.Fatalf("bad: %v", err) 533 } 534 535 err = state.UpsertDeployment(1000, deployment) 536 if err != nil { 537 t.Fatalf("err: %v", err) 538 } 539 if !watchFired(ws) { 540 t.Fatalf("bad") 541 } 542 543 ws = memdb.NewWatchSet() 544 out, err := state.DeploymentByID(ws, deployment.ID) 545 if err != nil { 546 t.Fatalf("err: %v", err) 547 } 548 549 if !reflect.DeepEqual(deployment, out) { 550 t.Fatalf("bad: %#v %#v", deployment, out) 551 } 552 553 index, err := state.Index("deployment") 554 if err != nil { 555 t.Fatalf("err: %v", err) 556 } 557 if index != 1000 { 558 t.Fatalf("bad: %d", index) 559 } 560 561 if watchFired(ws) { 562 t.Fatalf("bad") 563 } 564 } 565 566 // Tests that deployments of older create index and same job id are not returned 567 func TestStateStore_OldDeployment(t *testing.T) { 568 t.Parallel() 569 570 state := testStateStore(t) 571 job := mock.Job() 572 job.ID = "job1" 573 state.UpsertJob(1000, job) 574 575 deploy1 := mock.Deployment() 576 deploy1.JobID = job.ID 577 deploy1.JobCreateIndex = job.CreateIndex 578 579 deploy2 := mock.Deployment() 580 deploy2.JobID = job.ID 581 deploy2.JobCreateIndex = 11 582 583 require := require.New(t) 584 585 // Insert both deployments 586 err := state.UpsertDeployment(1001, deploy1) 587 require.Nil(err) 588 589 err = state.UpsertDeployment(1002, deploy2) 590 require.Nil(err) 591 592 ws := memdb.NewWatchSet() 593 // Should return both deployments 594 deploys, err := state.DeploymentsByJobID(ws, deploy1.Namespace, job.ID, true) 595 require.Nil(err) 596 require.Len(deploys, 2) 597 598 // Should only return deploy1 599 deploys, err = state.DeploymentsByJobID(ws, deploy1.Namespace, job.ID, false) 600 require.Nil(err) 601 require.Len(deploys, 1) 602 require.Equal(deploy1.ID, deploys[0].ID) 603 } 604 605 func TestStateStore_DeleteDeployment(t *testing.T) { 606 t.Parallel() 607 608 state := testStateStore(t) 609 d1 := mock.Deployment() 610 d2 := mock.Deployment() 611 612 err := state.UpsertDeployment(1000, d1) 613 if err != nil { 614 t.Fatalf("err: %v", err) 615 } 616 if err := state.UpsertDeployment(1001, d2); err != nil { 617 t.Fatalf("err: %v", err) 618 } 619 620 // Create a watchset so we can test that delete fires the watch 621 ws := memdb.NewWatchSet() 622 if _, err := state.DeploymentByID(ws, d1.ID); err != nil { 623 t.Fatalf("bad: %v", err) 624 } 625 626 err = state.DeleteDeployment(1002, []string{d1.ID, d2.ID}) 627 if err != nil { 628 t.Fatalf("err: %v", err) 629 } 630 631 if !watchFired(ws) { 632 t.Fatalf("bad") 633 } 634 635 ws = memdb.NewWatchSet() 636 out, err := state.DeploymentByID(ws, d1.ID) 637 if err != nil { 638 t.Fatalf("err: %v", err) 639 } 640 641 if out != nil { 642 t.Fatalf("bad: %#v %#v", d1, out) 643 } 644 645 index, err := state.Index("deployment") 646 if err != nil { 647 t.Fatalf("err: %v", err) 648 } 649 if index != 1002 { 650 t.Fatalf("bad: %d", index) 651 } 652 653 if watchFired(ws) { 654 t.Fatalf("bad") 655 } 656 } 657 658 func TestStateStore_Deployments(t *testing.T) { 659 t.Parallel() 660 661 state := testStateStore(t) 662 var deployments []*structs.Deployment 663 664 for i := 0; i < 10; i++ { 665 deployment := mock.Deployment() 666 deployments = append(deployments, deployment) 667 668 err := state.UpsertDeployment(1000+uint64(i), deployment) 669 if err != nil { 670 t.Fatalf("err: %v", err) 671 } 672 } 673 674 ws := memdb.NewWatchSet() 675 iter, err := state.Deployments(ws) 676 if err != nil { 677 t.Fatalf("err: %v", err) 678 } 679 680 var out []*structs.Deployment 681 for { 682 raw := iter.Next() 683 if raw == nil { 684 break 685 } 686 out = append(out, raw.(*structs.Deployment)) 687 } 688 689 lessThan := func(i, j int) bool { 690 return deployments[i].ID < deployments[j].ID 691 } 692 sort.Slice(deployments, lessThan) 693 sort.Slice(out, lessThan) 694 695 if !reflect.DeepEqual(deployments, out) { 696 t.Fatalf("bad: %#v %#v", deployments, out) 697 } 698 699 if watchFired(ws) { 700 t.Fatalf("bad") 701 } 702 } 703 704 func TestStateStore_DeploymentsByIDPrefix(t *testing.T) { 705 t.Parallel() 706 707 state := testStateStore(t) 708 deploy := mock.Deployment() 709 710 deploy.ID = "11111111-662e-d0ab-d1c9-3e434af7bdb4" 711 err := state.UpsertDeployment(1000, deploy) 712 if err != nil { 713 t.Fatalf("err: %v", err) 714 } 715 716 // Create a watchset so we can test that getters don't cause it to fire 717 ws := memdb.NewWatchSet() 718 iter, err := state.DeploymentsByIDPrefix(ws, deploy.Namespace, deploy.ID) 719 if err != nil { 720 t.Fatalf("err: %v", err) 721 } 722 723 gatherDeploys := func(iter memdb.ResultIterator) []*structs.Deployment { 724 var deploys []*structs.Deployment 725 for { 726 raw := iter.Next() 727 if raw == nil { 728 break 729 } 730 deploy := raw.(*structs.Deployment) 731 deploys = append(deploys, deploy) 732 } 733 return deploys 734 } 735 736 deploys := gatherDeploys(iter) 737 if len(deploys) != 1 { 738 t.Fatalf("err: %v", err) 739 } 740 741 if watchFired(ws) { 742 t.Fatalf("bad") 743 } 744 745 iter, err = state.DeploymentsByIDPrefix(ws, deploy.Namespace, "11") 746 if err != nil { 747 t.Fatalf("err: %v", err) 748 } 749 750 deploys = gatherDeploys(iter) 751 if len(deploys) != 1 { 752 t.Fatalf("err: %v", err) 753 } 754 755 deploy = mock.Deployment() 756 deploy.ID = "11222222-662e-d0ab-d1c9-3e434af7bdb4" 757 err = state.UpsertDeployment(1001, deploy) 758 if err != nil { 759 t.Fatalf("err: %v", err) 760 } 761 762 if !watchFired(ws) { 763 t.Fatalf("bad") 764 } 765 766 ws = memdb.NewWatchSet() 767 iter, err = state.DeploymentsByIDPrefix(ws, deploy.Namespace, "11") 768 if err != nil { 769 t.Fatalf("err: %v", err) 770 } 771 772 deploys = gatherDeploys(iter) 773 if len(deploys) != 2 { 774 t.Fatalf("err: %v", err) 775 } 776 777 iter, err = state.DeploymentsByIDPrefix(ws, deploy.Namespace, "1111") 778 if err != nil { 779 t.Fatalf("err: %v", err) 780 } 781 782 deploys = gatherDeploys(iter) 783 if len(deploys) != 1 { 784 t.Fatalf("err: %v", err) 785 } 786 787 if watchFired(ws) { 788 t.Fatalf("bad") 789 } 790 } 791 792 func TestStateStore_UpsertNode_Node(t *testing.T) { 793 t.Parallel() 794 795 require := require.New(t) 796 state := testStateStore(t) 797 node := mock.Node() 798 799 // Create a watchset so we can test that upsert fires the watch 800 ws := memdb.NewWatchSet() 801 _, err := state.NodeByID(ws, node.ID) 802 require.NoError(err) 803 804 require.NoError(state.UpsertNode(1000, node)) 805 require.True(watchFired(ws)) 806 807 ws = memdb.NewWatchSet() 808 out, err := state.NodeByID(ws, node.ID) 809 require.NoError(err) 810 811 out2, err := state.NodeBySecretID(ws, node.SecretID) 812 require.NoError(err) 813 require.EqualValues(node, out) 814 require.EqualValues(node, out2) 815 require.Len(out.Events, 1) 816 require.Equal(NodeRegisterEventRegistered, out.Events[0].Message) 817 818 index, err := state.Index("nodes") 819 require.NoError(err) 820 require.EqualValues(1000, index) 821 require.False(watchFired(ws)) 822 823 // Transition the node to down and then up and ensure we get a re-register 824 // event 825 down := out.Copy() 826 down.Status = structs.NodeStatusDown 827 require.NoError(state.UpsertNode(1001, down)) 828 require.NoError(state.UpsertNode(1002, out)) 829 830 out, err = state.NodeByID(ws, node.ID) 831 require.NoError(err) 832 require.Len(out.Events, 2) 833 require.Equal(NodeRegisterEventReregistered, out.Events[1].Message) 834 } 835 836 func TestStateStore_DeleteNode_Node(t *testing.T) { 837 t.Parallel() 838 839 state := testStateStore(t) 840 841 // Create and insert two nodes, which we'll delete 842 node0 := mock.Node() 843 node1 := mock.Node() 844 err := state.UpsertNode(1000, node0) 845 require.NoError(t, err) 846 err = state.UpsertNode(1001, node1) 847 require.NoError(t, err) 848 849 // Create a watchset so we can test that delete fires the watch 850 ws := memdb.NewWatchSet() 851 852 // Check that both nodes are not nil 853 out, err := state.NodeByID(ws, node0.ID) 854 require.NoError(t, err) 855 require.NotNil(t, out) 856 out, err = state.NodeByID(ws, node1.ID) 857 require.NoError(t, err) 858 require.NotNil(t, out) 859 860 // Delete both nodes in a batch, fires the watch 861 err = state.DeleteNode(1002, []string{node0.ID, node1.ID}) 862 require.NoError(t, err) 863 require.True(t, watchFired(ws)) 864 865 // Check that both nodes are nil 866 ws = memdb.NewWatchSet() 867 out, err = state.NodeByID(ws, node0.ID) 868 require.NoError(t, err) 869 require.Nil(t, out) 870 out, err = state.NodeByID(ws, node1.ID) 871 require.NoError(t, err) 872 require.Nil(t, out) 873 874 // Ensure that the index is still at 1002, from DeleteNode 875 index, err := state.Index("nodes") 876 require.NoError(t, err) 877 require.Equal(t, uint64(1002), index) 878 require.False(t, watchFired(ws)) 879 } 880 881 func TestStateStore_UpdateNodeStatus_Node(t *testing.T) { 882 t.Parallel() 883 require := require.New(t) 884 885 state := testStateStore(t) 886 node := mock.Node() 887 888 require.NoError(state.UpsertNode(800, node)) 889 890 // Create a watchset so we can test that update node status fires the watch 891 ws := memdb.NewWatchSet() 892 _, err := state.NodeByID(ws, node.ID) 893 require.NoError(err) 894 895 event := &structs.NodeEvent{ 896 Message: "Node ready foo", 897 Subsystem: structs.NodeEventSubsystemCluster, 898 Timestamp: time.Now(), 899 } 900 901 require.NoError(state.UpdateNodeStatus(801, node.ID, structs.NodeStatusReady, 70, event)) 902 require.True(watchFired(ws)) 903 904 ws = memdb.NewWatchSet() 905 out, err := state.NodeByID(ws, node.ID) 906 require.NoError(err) 907 require.Equal(structs.NodeStatusReady, out.Status) 908 require.EqualValues(801, out.ModifyIndex) 909 require.EqualValues(70, out.StatusUpdatedAt) 910 require.Len(out.Events, 2) 911 require.Equal(event.Message, out.Events[1].Message) 912 913 index, err := state.Index("nodes") 914 require.NoError(err) 915 require.EqualValues(801, index) 916 require.False(watchFired(ws)) 917 } 918 919 func TestStateStore_BatchUpdateNodeDrain(t *testing.T) { 920 t.Parallel() 921 require := require.New(t) 922 923 state := testStateStore(t) 924 925 n1, n2 := mock.Node(), mock.Node() 926 require.Nil(state.UpsertNode(1000, n1)) 927 require.Nil(state.UpsertNode(1001, n2)) 928 929 // Create a watchset so we can test that update node drain fires the watch 930 ws := memdb.NewWatchSet() 931 _, err := state.NodeByID(ws, n1.ID) 932 require.Nil(err) 933 934 expectedDrain := &structs.DrainStrategy{ 935 DrainSpec: structs.DrainSpec{ 936 Deadline: -1 * time.Second, 937 }, 938 } 939 940 update := map[string]*structs.DrainUpdate{ 941 n1.ID: { 942 DrainStrategy: expectedDrain, 943 }, 944 n2.ID: { 945 DrainStrategy: expectedDrain, 946 }, 947 } 948 949 event := &structs.NodeEvent{ 950 Message: "Drain strategy enabled", 951 Subsystem: structs.NodeEventSubsystemDrain, 952 Timestamp: time.Now(), 953 } 954 events := map[string]*structs.NodeEvent{ 955 n1.ID: event, 956 n2.ID: event, 957 } 958 959 require.Nil(state.BatchUpdateNodeDrain(1002, 7, update, events)) 960 require.True(watchFired(ws)) 961 962 ws = memdb.NewWatchSet() 963 for _, id := range []string{n1.ID, n2.ID} { 964 out, err := state.NodeByID(ws, id) 965 require.Nil(err) 966 require.True(out.Drain) 967 require.NotNil(out.DrainStrategy) 968 require.Equal(out.DrainStrategy, expectedDrain) 969 require.Len(out.Events, 2) 970 require.EqualValues(1002, out.ModifyIndex) 971 require.EqualValues(7, out.StatusUpdatedAt) 972 } 973 974 index, err := state.Index("nodes") 975 require.Nil(err) 976 require.EqualValues(1002, index) 977 require.False(watchFired(ws)) 978 } 979 980 func TestStateStore_UpdateNodeDrain_Node(t *testing.T) { 981 t.Parallel() 982 require := require.New(t) 983 984 state := testStateStore(t) 985 node := mock.Node() 986 987 require.Nil(state.UpsertNode(1000, node)) 988 989 // Create a watchset so we can test that update node drain fires the watch 990 ws := memdb.NewWatchSet() 991 _, err := state.NodeByID(ws, node.ID) 992 require.Nil(err) 993 994 expectedDrain := &structs.DrainStrategy{ 995 DrainSpec: structs.DrainSpec{ 996 Deadline: -1 * time.Second, 997 }, 998 } 999 1000 event := &structs.NodeEvent{ 1001 Message: "Drain strategy enabled", 1002 Subsystem: structs.NodeEventSubsystemDrain, 1003 Timestamp: time.Now(), 1004 } 1005 require.Nil(state.UpdateNodeDrain(1001, node.ID, expectedDrain, false, 7, event)) 1006 require.True(watchFired(ws)) 1007 1008 ws = memdb.NewWatchSet() 1009 out, err := state.NodeByID(ws, node.ID) 1010 require.Nil(err) 1011 require.True(out.Drain) 1012 require.NotNil(out.DrainStrategy) 1013 require.Equal(out.DrainStrategy, expectedDrain) 1014 require.Len(out.Events, 2) 1015 require.EqualValues(1001, out.ModifyIndex) 1016 require.EqualValues(7, out.StatusUpdatedAt) 1017 1018 index, err := state.Index("nodes") 1019 require.Nil(err) 1020 require.EqualValues(1001, index) 1021 require.False(watchFired(ws)) 1022 } 1023 1024 func TestStateStore_AddSingleNodeEvent(t *testing.T) { 1025 t.Parallel() 1026 require := require.New(t) 1027 1028 state := testStateStore(t) 1029 1030 node := mock.Node() 1031 1032 // We create a new node event every time we register a node 1033 err := state.UpsertNode(1000, node) 1034 require.Nil(err) 1035 1036 require.Equal(1, len(node.Events)) 1037 require.Equal(structs.NodeEventSubsystemCluster, node.Events[0].Subsystem) 1038 require.Equal(NodeRegisterEventRegistered, node.Events[0].Message) 1039 1040 // Create a watchset so we can test that AddNodeEvent fires the watch 1041 ws := memdb.NewWatchSet() 1042 _, err = state.NodeByID(ws, node.ID) 1043 require.Nil(err) 1044 1045 nodeEvent := &structs.NodeEvent{ 1046 Message: "failed", 1047 Subsystem: "Driver", 1048 Timestamp: time.Now(), 1049 } 1050 nodeEvents := map[string][]*structs.NodeEvent{ 1051 node.ID: {nodeEvent}, 1052 } 1053 err = state.UpsertNodeEvents(uint64(1001), nodeEvents) 1054 require.Nil(err) 1055 1056 require.True(watchFired(ws)) 1057 1058 ws = memdb.NewWatchSet() 1059 out, err := state.NodeByID(ws, node.ID) 1060 require.Nil(err) 1061 1062 require.Equal(2, len(out.Events)) 1063 require.Equal(nodeEvent, out.Events[1]) 1064 } 1065 1066 // To prevent stale node events from accumulating, we limit the number of 1067 // stored node events to 10. 1068 func TestStateStore_NodeEvents_RetentionWindow(t *testing.T) { 1069 t.Parallel() 1070 require := require.New(t) 1071 1072 state := testStateStore(t) 1073 1074 node := mock.Node() 1075 1076 err := state.UpsertNode(1000, node) 1077 if err != nil { 1078 t.Fatalf("err: %v", err) 1079 } 1080 require.Equal(1, len(node.Events)) 1081 require.Equal(structs.NodeEventSubsystemCluster, node.Events[0].Subsystem) 1082 require.Equal(NodeRegisterEventRegistered, node.Events[0].Message) 1083 1084 var out *structs.Node 1085 for i := 1; i <= 20; i++ { 1086 ws := memdb.NewWatchSet() 1087 out, err = state.NodeByID(ws, node.ID) 1088 require.Nil(err) 1089 1090 nodeEvent := &structs.NodeEvent{ 1091 Message: fmt.Sprintf("%dith failed", i), 1092 Subsystem: "Driver", 1093 Timestamp: time.Now(), 1094 } 1095 1096 nodeEvents := map[string][]*structs.NodeEvent{ 1097 out.ID: {nodeEvent}, 1098 } 1099 err := state.UpsertNodeEvents(uint64(i), nodeEvents) 1100 require.Nil(err) 1101 1102 require.True(watchFired(ws)) 1103 ws = memdb.NewWatchSet() 1104 out, err = state.NodeByID(ws, node.ID) 1105 require.Nil(err) 1106 } 1107 1108 ws := memdb.NewWatchSet() 1109 out, err = state.NodeByID(ws, node.ID) 1110 require.Nil(err) 1111 1112 require.Equal(10, len(out.Events)) 1113 require.Equal(uint64(11), out.Events[0].CreateIndex) 1114 require.Equal(uint64(20), out.Events[len(out.Events)-1].CreateIndex) 1115 } 1116 1117 func TestStateStore_UpdateNodeDrain_ResetEligiblity(t *testing.T) { 1118 t.Parallel() 1119 require := require.New(t) 1120 1121 state := testStateStore(t) 1122 node := mock.Node() 1123 require.Nil(state.UpsertNode(1000, node)) 1124 1125 // Create a watchset so we can test that update node drain fires the watch 1126 ws := memdb.NewWatchSet() 1127 _, err := state.NodeByID(ws, node.ID) 1128 require.Nil(err) 1129 1130 drain := &structs.DrainStrategy{ 1131 DrainSpec: structs.DrainSpec{ 1132 Deadline: -1 * time.Second, 1133 }, 1134 } 1135 1136 event1 := &structs.NodeEvent{ 1137 Message: "Drain strategy enabled", 1138 Subsystem: structs.NodeEventSubsystemDrain, 1139 Timestamp: time.Now(), 1140 } 1141 require.Nil(state.UpdateNodeDrain(1001, node.ID, drain, false, 7, event1)) 1142 require.True(watchFired(ws)) 1143 1144 // Remove the drain 1145 event2 := &structs.NodeEvent{ 1146 Message: "Drain strategy disabled", 1147 Subsystem: structs.NodeEventSubsystemDrain, 1148 Timestamp: time.Now(), 1149 } 1150 require.Nil(state.UpdateNodeDrain(1002, node.ID, nil, true, 9, event2)) 1151 1152 ws = memdb.NewWatchSet() 1153 out, err := state.NodeByID(ws, node.ID) 1154 require.Nil(err) 1155 require.False(out.Drain) 1156 require.Nil(out.DrainStrategy) 1157 require.Equal(out.SchedulingEligibility, structs.NodeSchedulingEligible) 1158 require.Len(out.Events, 3) 1159 require.EqualValues(1002, out.ModifyIndex) 1160 require.EqualValues(9, out.StatusUpdatedAt) 1161 1162 index, err := state.Index("nodes") 1163 require.Nil(err) 1164 require.EqualValues(1002, index) 1165 require.False(watchFired(ws)) 1166 } 1167 1168 func TestStateStore_UpdateNodeEligibility(t *testing.T) { 1169 t.Parallel() 1170 require := require.New(t) 1171 1172 state := testStateStore(t) 1173 node := mock.Node() 1174 1175 err := state.UpsertNode(1000, node) 1176 if err != nil { 1177 t.Fatalf("err: %v", err) 1178 } 1179 1180 expectedEligibility := structs.NodeSchedulingIneligible 1181 1182 // Create a watchset so we can test that update node drain fires the watch 1183 ws := memdb.NewWatchSet() 1184 if _, err := state.NodeByID(ws, node.ID); err != nil { 1185 t.Fatalf("bad: %v", err) 1186 } 1187 1188 event := &structs.NodeEvent{ 1189 Message: "Node marked as ineligible", 1190 Subsystem: structs.NodeEventSubsystemCluster, 1191 Timestamp: time.Now(), 1192 } 1193 require.Nil(state.UpdateNodeEligibility(1001, node.ID, expectedEligibility, 7, event)) 1194 require.True(watchFired(ws)) 1195 1196 ws = memdb.NewWatchSet() 1197 out, err := state.NodeByID(ws, node.ID) 1198 require.Nil(err) 1199 require.Equal(out.SchedulingEligibility, expectedEligibility) 1200 require.Len(out.Events, 2) 1201 require.Equal(out.Events[1], event) 1202 require.EqualValues(1001, out.ModifyIndex) 1203 require.EqualValues(7, out.StatusUpdatedAt) 1204 1205 index, err := state.Index("nodes") 1206 require.Nil(err) 1207 require.EqualValues(1001, index) 1208 require.False(watchFired(ws)) 1209 1210 // Set a drain strategy 1211 expectedDrain := &structs.DrainStrategy{ 1212 DrainSpec: structs.DrainSpec{ 1213 Deadline: -1 * time.Second, 1214 }, 1215 } 1216 require.Nil(state.UpdateNodeDrain(1002, node.ID, expectedDrain, false, 7, nil)) 1217 1218 // Try to set the node to eligible 1219 err = state.UpdateNodeEligibility(1003, node.ID, structs.NodeSchedulingEligible, 9, nil) 1220 require.NotNil(err) 1221 require.Contains(err.Error(), "while it is draining") 1222 } 1223 1224 func TestStateStore_Nodes(t *testing.T) { 1225 t.Parallel() 1226 1227 state := testStateStore(t) 1228 var nodes []*structs.Node 1229 1230 for i := 0; i < 10; i++ { 1231 node := mock.Node() 1232 nodes = append(nodes, node) 1233 1234 err := state.UpsertNode(1000+uint64(i), node) 1235 if err != nil { 1236 t.Fatalf("err: %v", err) 1237 } 1238 } 1239 1240 // Create a watchset so we can test that getters don't cause it to fire 1241 ws := memdb.NewWatchSet() 1242 iter, err := state.Nodes(ws) 1243 if err != nil { 1244 t.Fatalf("bad: %v", err) 1245 } 1246 1247 var out []*structs.Node 1248 for { 1249 raw := iter.Next() 1250 if raw == nil { 1251 break 1252 } 1253 out = append(out, raw.(*structs.Node)) 1254 } 1255 1256 sort.Sort(NodeIDSort(nodes)) 1257 sort.Sort(NodeIDSort(out)) 1258 1259 if !reflect.DeepEqual(nodes, out) { 1260 t.Fatalf("bad: %#v %#v", nodes, out) 1261 } 1262 1263 if watchFired(ws) { 1264 t.Fatalf("bad") 1265 } 1266 } 1267 1268 func TestStateStore_NodesByIDPrefix(t *testing.T) { 1269 t.Parallel() 1270 1271 state := testStateStore(t) 1272 node := mock.Node() 1273 1274 node.ID = "11111111-662e-d0ab-d1c9-3e434af7bdb4" 1275 err := state.UpsertNode(1000, node) 1276 if err != nil { 1277 t.Fatalf("err: %v", err) 1278 } 1279 1280 // Create a watchset so we can test that getters don't cause it to fire 1281 ws := memdb.NewWatchSet() 1282 iter, err := state.NodesByIDPrefix(ws, node.ID) 1283 if err != nil { 1284 t.Fatalf("err: %v", err) 1285 } 1286 1287 gatherNodes := func(iter memdb.ResultIterator) []*structs.Node { 1288 var nodes []*structs.Node 1289 for { 1290 raw := iter.Next() 1291 if raw == nil { 1292 break 1293 } 1294 node := raw.(*structs.Node) 1295 nodes = append(nodes, node) 1296 } 1297 return nodes 1298 } 1299 1300 nodes := gatherNodes(iter) 1301 if len(nodes) != 1 { 1302 t.Fatalf("err: %v", err) 1303 } 1304 1305 if watchFired(ws) { 1306 t.Fatalf("bad") 1307 } 1308 1309 iter, err = state.NodesByIDPrefix(ws, "11") 1310 if err != nil { 1311 t.Fatalf("err: %v", err) 1312 } 1313 1314 nodes = gatherNodes(iter) 1315 if len(nodes) != 1 { 1316 t.Fatalf("err: %v", err) 1317 } 1318 1319 node = mock.Node() 1320 node.ID = "11222222-662e-d0ab-d1c9-3e434af7bdb4" 1321 err = state.UpsertNode(1001, node) 1322 if err != nil { 1323 t.Fatalf("err: %v", err) 1324 } 1325 1326 if !watchFired(ws) { 1327 t.Fatalf("bad") 1328 } 1329 1330 ws = memdb.NewWatchSet() 1331 iter, err = state.NodesByIDPrefix(ws, "11") 1332 if err != nil { 1333 t.Fatalf("err: %v", err) 1334 } 1335 1336 nodes = gatherNodes(iter) 1337 if len(nodes) != 2 { 1338 t.Fatalf("err: %v", err) 1339 } 1340 1341 iter, err = state.NodesByIDPrefix(ws, "1111") 1342 if err != nil { 1343 t.Fatalf("err: %v", err) 1344 } 1345 1346 nodes = gatherNodes(iter) 1347 if len(nodes) != 1 { 1348 t.Fatalf("err: %v", err) 1349 } 1350 1351 if watchFired(ws) { 1352 t.Fatalf("bad") 1353 } 1354 } 1355 1356 func TestStateStore_RestoreNode(t *testing.T) { 1357 t.Parallel() 1358 1359 state := testStateStore(t) 1360 node := mock.Node() 1361 1362 restore, err := state.Restore() 1363 if err != nil { 1364 t.Fatalf("err: %v", err) 1365 } 1366 1367 err = restore.NodeRestore(node) 1368 if err != nil { 1369 t.Fatalf("err: %v", err) 1370 } 1371 restore.Commit() 1372 1373 ws := memdb.NewWatchSet() 1374 out, err := state.NodeByID(ws, node.ID) 1375 if err != nil { 1376 t.Fatalf("err: %v", err) 1377 } 1378 1379 if !reflect.DeepEqual(out, node) { 1380 t.Fatalf("Bad: %#v %#v", out, node) 1381 } 1382 } 1383 1384 func TestStateStore_UpsertJob_Job(t *testing.T) { 1385 t.Parallel() 1386 1387 state := testStateStore(t) 1388 job := mock.Job() 1389 1390 // Create a watchset so we can test that upsert fires the watch 1391 ws := memdb.NewWatchSet() 1392 _, err := state.JobByID(ws, job.Namespace, job.ID) 1393 if err != nil { 1394 t.Fatalf("bad: %v", err) 1395 } 1396 1397 if err := state.UpsertJob(1000, job); err != nil { 1398 t.Fatalf("err: %v", err) 1399 } 1400 if !watchFired(ws) { 1401 t.Fatalf("bad") 1402 } 1403 1404 ws = memdb.NewWatchSet() 1405 out, err := state.JobByID(ws, job.Namespace, job.ID) 1406 if err != nil { 1407 t.Fatalf("err: %v", err) 1408 } 1409 1410 if !reflect.DeepEqual(job, out) { 1411 t.Fatalf("bad: %#v %#v", job, out) 1412 } 1413 1414 index, err := state.Index("jobs") 1415 if err != nil { 1416 t.Fatalf("err: %v", err) 1417 } 1418 if index != 1000 { 1419 t.Fatalf("bad: %d", index) 1420 } 1421 1422 summary, err := state.JobSummaryByID(ws, job.Namespace, job.ID) 1423 if err != nil { 1424 t.Fatalf("err: %v", err) 1425 } 1426 if summary == nil { 1427 t.Fatalf("nil summary") 1428 } 1429 if summary.JobID != job.ID { 1430 t.Fatalf("bad summary id: %v", summary.JobID) 1431 } 1432 _, ok := summary.Summary["web"] 1433 if !ok { 1434 t.Fatalf("nil summary for task group") 1435 } 1436 if watchFired(ws) { 1437 t.Fatalf("bad") 1438 } 1439 1440 // Check the job versions 1441 allVersions, err := state.JobVersionsByID(ws, job.Namespace, job.ID) 1442 if err != nil { 1443 t.Fatalf("err: %v", err) 1444 } 1445 if len(allVersions) != 1 { 1446 t.Fatalf("got %d; want 1", len(allVersions)) 1447 } 1448 1449 if a := allVersions[0]; a.ID != job.ID || a.Version != 0 { 1450 t.Fatalf("bad: %v", a) 1451 } 1452 1453 // Test the looking up the job by version returns the same results 1454 vout, err := state.JobByIDAndVersion(ws, job.Namespace, job.ID, 0) 1455 if err != nil { 1456 t.Fatalf("err: %v", err) 1457 } 1458 1459 if !reflect.DeepEqual(out, vout) { 1460 t.Fatalf("bad: %#v %#v", out, vout) 1461 } 1462 } 1463 1464 func TestStateStore_UpdateUpsertJob_Job(t *testing.T) { 1465 t.Parallel() 1466 1467 state := testStateStore(t) 1468 job := mock.Job() 1469 1470 // Create a watchset so we can test that upsert fires the watch 1471 ws := memdb.NewWatchSet() 1472 _, err := state.JobByID(ws, job.Namespace, job.ID) 1473 if err != nil { 1474 t.Fatalf("bad: %v", err) 1475 } 1476 1477 if err := state.UpsertJob(1000, job); err != nil { 1478 t.Fatalf("err: %v", err) 1479 } 1480 1481 job2 := mock.Job() 1482 job2.ID = job.ID 1483 job2.AllAtOnce = true 1484 err = state.UpsertJob(1001, job2) 1485 if err != nil { 1486 t.Fatalf("err: %v", err) 1487 } 1488 1489 if !watchFired(ws) { 1490 t.Fatalf("bad") 1491 } 1492 1493 ws = memdb.NewWatchSet() 1494 out, err := state.JobByID(ws, job.Namespace, job.ID) 1495 if err != nil { 1496 t.Fatalf("err: %v", err) 1497 } 1498 1499 if !reflect.DeepEqual(job2, out) { 1500 t.Fatalf("bad: %#v %#v", job2, out) 1501 } 1502 1503 if out.CreateIndex != 1000 { 1504 t.Fatalf("bad: %#v", out) 1505 } 1506 if out.ModifyIndex != 1001 { 1507 t.Fatalf("bad: %#v", out) 1508 } 1509 if out.Version != 1 { 1510 t.Fatalf("bad: %#v", out) 1511 } 1512 1513 index, err := state.Index("jobs") 1514 if err != nil { 1515 t.Fatalf("err: %v", err) 1516 } 1517 if index != 1001 { 1518 t.Fatalf("bad: %d", index) 1519 } 1520 1521 // Test the looking up the job by version returns the same results 1522 vout, err := state.JobByIDAndVersion(ws, job.Namespace, job.ID, 1) 1523 if err != nil { 1524 t.Fatalf("err: %v", err) 1525 } 1526 1527 if !reflect.DeepEqual(out, vout) { 1528 t.Fatalf("bad: %#v %#v", out, vout) 1529 } 1530 1531 // Test that the job summary remains the same if the job is updated but 1532 // count remains same 1533 summary, err := state.JobSummaryByID(ws, job.Namespace, job.ID) 1534 if err != nil { 1535 t.Fatalf("err: %v", err) 1536 } 1537 if summary == nil { 1538 t.Fatalf("nil summary") 1539 } 1540 if summary.JobID != job.ID { 1541 t.Fatalf("bad summary id: %v", summary.JobID) 1542 } 1543 _, ok := summary.Summary["web"] 1544 if !ok { 1545 t.Fatalf("nil summary for task group") 1546 } 1547 1548 // Check the job versions 1549 allVersions, err := state.JobVersionsByID(ws, job.Namespace, job.ID) 1550 if err != nil { 1551 t.Fatalf("err: %v", err) 1552 } 1553 if len(allVersions) != 2 { 1554 t.Fatalf("got %d; want 1", len(allVersions)) 1555 } 1556 1557 if a := allVersions[0]; a.ID != job.ID || a.Version != 1 || !a.AllAtOnce { 1558 t.Fatalf("bad: %+v", a) 1559 } 1560 if a := allVersions[1]; a.ID != job.ID || a.Version != 0 || a.AllAtOnce { 1561 t.Fatalf("bad: %+v", a) 1562 } 1563 1564 if watchFired(ws) { 1565 t.Fatalf("bad") 1566 } 1567 } 1568 1569 func TestStateStore_UpdateUpsertJob_PeriodicJob(t *testing.T) { 1570 t.Parallel() 1571 1572 state := testStateStore(t) 1573 job := mock.PeriodicJob() 1574 1575 // Create a watchset so we can test that upsert fires the watch 1576 ws := memdb.NewWatchSet() 1577 _, err := state.JobByID(ws, job.Namespace, job.ID) 1578 if err != nil { 1579 t.Fatalf("bad: %v", err) 1580 } 1581 1582 if err := state.UpsertJob(1000, job); err != nil { 1583 t.Fatalf("err: %v", err) 1584 } 1585 1586 // Create a child and an evaluation 1587 job2 := job.Copy() 1588 job2.Periodic = nil 1589 job2.ID = fmt.Sprintf("%v/%s-1490635020", job.ID, structs.PeriodicLaunchSuffix) 1590 err = state.UpsertJob(1001, job2) 1591 if err != nil { 1592 t.Fatalf("err: %v", err) 1593 } 1594 1595 eval := mock.Eval() 1596 eval.JobID = job2.ID 1597 err = state.UpsertEvals(1002, []*structs.Evaluation{eval}) 1598 if err != nil { 1599 t.Fatalf("err: %v", err) 1600 } 1601 1602 job3 := job.Copy() 1603 job3.TaskGroups[0].Tasks[0].Name = "new name" 1604 err = state.UpsertJob(1003, job3) 1605 if err != nil { 1606 t.Fatalf("err: %v", err) 1607 } 1608 1609 if !watchFired(ws) { 1610 t.Fatalf("bad") 1611 } 1612 1613 ws = memdb.NewWatchSet() 1614 out, err := state.JobByID(ws, job.Namespace, job.ID) 1615 if err != nil { 1616 t.Fatalf("err: %v", err) 1617 } 1618 1619 if s, e := out.Status, structs.JobStatusRunning; s != e { 1620 t.Fatalf("got status %v; want %v", s, e) 1621 } 1622 1623 } 1624 1625 func TestStateStore_UpsertJob_BadNamespace(t *testing.T) { 1626 t.Parallel() 1627 1628 assert := assert.New(t) 1629 state := testStateStore(t) 1630 job := mock.Job() 1631 job.Namespace = "foo" 1632 1633 err := state.UpsertJob(1000, job) 1634 assert.Contains(err.Error(), "nonexistent namespace") 1635 1636 ws := memdb.NewWatchSet() 1637 out, err := state.JobByID(ws, job.Namespace, job.ID) 1638 assert.Nil(err) 1639 assert.Nil(out) 1640 } 1641 1642 // Upsert a job that is the child of a parent job and ensures its summary gets 1643 // updated. 1644 func TestStateStore_UpsertJob_ChildJob(t *testing.T) { 1645 t.Parallel() 1646 1647 state := testStateStore(t) 1648 1649 // Create a watchset so we can test that upsert fires the watch 1650 parent := mock.Job() 1651 ws := memdb.NewWatchSet() 1652 _, err := state.JobByID(ws, parent.Namespace, parent.ID) 1653 if err != nil { 1654 t.Fatalf("bad: %v", err) 1655 } 1656 1657 if err := state.UpsertJob(1000, parent); err != nil { 1658 t.Fatalf("err: %v", err) 1659 } 1660 1661 child := mock.Job() 1662 child.ParentID = parent.ID 1663 if err := state.UpsertJob(1001, child); err != nil { 1664 t.Fatalf("err: %v", err) 1665 } 1666 1667 summary, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID) 1668 if err != nil { 1669 t.Fatalf("err: %v", err) 1670 } 1671 if summary == nil { 1672 t.Fatalf("nil summary") 1673 } 1674 if summary.JobID != parent.ID { 1675 t.Fatalf("bad summary id: %v", parent.ID) 1676 } 1677 if summary.Children == nil { 1678 t.Fatalf("nil children summary") 1679 } 1680 if summary.Children.Pending != 1 || summary.Children.Running != 0 || summary.Children.Dead != 0 { 1681 t.Fatalf("bad children summary: %v", summary.Children) 1682 } 1683 if !watchFired(ws) { 1684 t.Fatalf("bad") 1685 } 1686 } 1687 1688 func TestStateStore_UpdateUpsertJob_JobVersion(t *testing.T) { 1689 t.Parallel() 1690 1691 state := testStateStore(t) 1692 1693 // Create a job and mark it as stable 1694 job := mock.Job() 1695 job.Stable = true 1696 job.Name = "0" 1697 1698 // Create a watchset so we can test that upsert fires the watch 1699 ws := memdb.NewWatchSet() 1700 _, err := state.JobVersionsByID(ws, job.Namespace, job.ID) 1701 if err != nil { 1702 t.Fatalf("bad: %v", err) 1703 } 1704 1705 if err := state.UpsertJob(1000, job); err != nil { 1706 t.Fatalf("err: %v", err) 1707 } 1708 1709 if !watchFired(ws) { 1710 t.Fatalf("bad") 1711 } 1712 1713 var finalJob *structs.Job 1714 for i := 1; i < 300; i++ { 1715 finalJob = mock.Job() 1716 finalJob.ID = job.ID 1717 finalJob.Name = fmt.Sprintf("%d", i) 1718 err = state.UpsertJob(uint64(1000+i), finalJob) 1719 if err != nil { 1720 t.Fatalf("err: %v", err) 1721 } 1722 } 1723 1724 ws = memdb.NewWatchSet() 1725 out, err := state.JobByID(ws, job.Namespace, job.ID) 1726 if err != nil { 1727 t.Fatalf("err: %v", err) 1728 } 1729 1730 if !reflect.DeepEqual(finalJob, out) { 1731 t.Fatalf("bad: %#v %#v", finalJob, out) 1732 } 1733 1734 if out.CreateIndex != 1000 { 1735 t.Fatalf("bad: %#v", out) 1736 } 1737 if out.ModifyIndex != 1299 { 1738 t.Fatalf("bad: %#v", out) 1739 } 1740 if out.Version != 299 { 1741 t.Fatalf("bad: %#v", out) 1742 } 1743 1744 index, err := state.Index("job_version") 1745 if err != nil { 1746 t.Fatalf("err: %v", err) 1747 } 1748 if index != 1299 { 1749 t.Fatalf("bad: %d", index) 1750 } 1751 1752 // Check the job versions 1753 allVersions, err := state.JobVersionsByID(ws, job.Namespace, job.ID) 1754 if err != nil { 1755 t.Fatalf("err: %v", err) 1756 } 1757 if len(allVersions) != structs.JobTrackedVersions { 1758 t.Fatalf("got %d; want %d", len(allVersions), structs.JobTrackedVersions) 1759 } 1760 1761 if a := allVersions[0]; a.ID != job.ID || a.Version != 299 || a.Name != "299" { 1762 t.Fatalf("bad: %+v", a) 1763 } 1764 if a := allVersions[1]; a.ID != job.ID || a.Version != 298 || a.Name != "298" { 1765 t.Fatalf("bad: %+v", a) 1766 } 1767 1768 // Ensure we didn't delete the stable job 1769 if a := allVersions[structs.JobTrackedVersions-1]; a.ID != job.ID || 1770 a.Version != 0 || a.Name != "0" || !a.Stable { 1771 t.Fatalf("bad: %+v", a) 1772 } 1773 1774 if watchFired(ws) { 1775 t.Fatalf("bad") 1776 } 1777 } 1778 1779 func TestStateStore_DeleteJob_Job(t *testing.T) { 1780 t.Parallel() 1781 1782 state := testStateStore(t) 1783 job := mock.Job() 1784 1785 err := state.UpsertJob(1000, job) 1786 if err != nil { 1787 t.Fatalf("err: %v", err) 1788 } 1789 1790 // Create a watchset so we can test that delete fires the watch 1791 ws := memdb.NewWatchSet() 1792 if _, err := state.JobByID(ws, job.Namespace, job.ID); err != nil { 1793 t.Fatalf("bad: %v", err) 1794 } 1795 1796 err = state.DeleteJob(1001, job.Namespace, job.ID) 1797 if err != nil { 1798 t.Fatalf("err: %v", err) 1799 } 1800 1801 if !watchFired(ws) { 1802 t.Fatalf("bad") 1803 } 1804 1805 ws = memdb.NewWatchSet() 1806 out, err := state.JobByID(ws, job.Namespace, job.ID) 1807 if err != nil { 1808 t.Fatalf("err: %v", err) 1809 } 1810 1811 if out != nil { 1812 t.Fatalf("bad: %#v %#v", job, out) 1813 } 1814 1815 index, err := state.Index("jobs") 1816 if err != nil { 1817 t.Fatalf("err: %v", err) 1818 } 1819 if index != 1001 { 1820 t.Fatalf("bad: %d", index) 1821 } 1822 1823 summary, err := state.JobSummaryByID(ws, job.Namespace, job.ID) 1824 if err != nil { 1825 t.Fatalf("err: %v", err) 1826 } 1827 if summary != nil { 1828 t.Fatalf("expected summary to be nil, but got: %v", summary) 1829 } 1830 1831 index, err = state.Index("job_summary") 1832 if err != nil { 1833 t.Fatalf("err: %v", err) 1834 } 1835 if index != 1001 { 1836 t.Fatalf("bad: %d", index) 1837 } 1838 1839 versions, err := state.JobVersionsByID(ws, job.Namespace, job.ID) 1840 if err != nil { 1841 t.Fatalf("err: %v", err) 1842 } 1843 if len(versions) != 0 { 1844 t.Fatalf("expected no job versions") 1845 } 1846 1847 index, err = state.Index("job_summary") 1848 if err != nil { 1849 t.Fatalf("err: %v", err) 1850 } 1851 if index != 1001 { 1852 t.Fatalf("bad: %d", index) 1853 } 1854 1855 if watchFired(ws) { 1856 t.Fatalf("bad") 1857 } 1858 } 1859 1860 func TestStateStore_DeleteJobTxn_BatchDeletes(t *testing.T) { 1861 t.Parallel() 1862 1863 state := testStateStore(t) 1864 1865 const testJobCount = 10 1866 const jobVersionCount = 4 1867 1868 stateIndex := uint64(1000) 1869 1870 jobs := make([]*structs.Job, testJobCount) 1871 for i := 0; i < testJobCount; i++ { 1872 stateIndex++ 1873 job := mock.BatchJob() 1874 1875 err := state.UpsertJob(stateIndex, job) 1876 require.NoError(t, err) 1877 1878 jobs[i] = job 1879 1880 // Create some versions 1881 for vi := 1; vi < jobVersionCount; vi++ { 1882 stateIndex++ 1883 1884 job := job.Copy() 1885 job.TaskGroups[0].Tasks[0].Env = map[string]string{ 1886 "Version": fmt.Sprintf("%d", vi), 1887 } 1888 1889 require.NoError(t, state.UpsertJob(stateIndex, job)) 1890 } 1891 } 1892 1893 ws := memdb.NewWatchSet() 1894 1895 // Sanity check that jobs are present in DB 1896 job, err := state.JobByID(ws, jobs[0].Namespace, jobs[0].ID) 1897 require.NoError(t, err) 1898 require.Equal(t, jobs[0].ID, job.ID) 1899 1900 jobVersions, err := state.JobVersionsByID(ws, jobs[0].Namespace, jobs[0].ID) 1901 require.NoError(t, err) 1902 require.Equal(t, jobVersionCount, len(jobVersions)) 1903 1904 // Actually delete 1905 const deletionIndex = uint64(10001) 1906 err = state.WithWriteTransaction(func(txn Txn) error { 1907 for i, job := range jobs { 1908 err := state.DeleteJobTxn(deletionIndex, job.Namespace, job.ID, txn) 1909 require.NoError(t, err, "failed at %d %e", i, err) 1910 } 1911 return nil 1912 }) 1913 assert.NoError(t, err) 1914 1915 assert.True(t, watchFired(ws)) 1916 1917 ws = memdb.NewWatchSet() 1918 out, err := state.JobByID(ws, jobs[0].Namespace, jobs[0].ID) 1919 require.NoError(t, err) 1920 require.Nil(t, out) 1921 1922 jobVersions, err = state.JobVersionsByID(ws, jobs[0].Namespace, jobs[0].ID) 1923 require.NoError(t, err) 1924 require.Empty(t, jobVersions) 1925 1926 index, err := state.Index("jobs") 1927 require.NoError(t, err) 1928 require.Equal(t, deletionIndex, index) 1929 } 1930 1931 func TestStateStore_DeleteJob_MultipleVersions(t *testing.T) { 1932 t.Parallel() 1933 1934 state := testStateStore(t) 1935 assert := assert.New(t) 1936 1937 // Create a job and mark it as stable 1938 job := mock.Job() 1939 job.Stable = true 1940 job.Priority = 0 1941 1942 // Create a watchset so we can test that upsert fires the watch 1943 ws := memdb.NewWatchSet() 1944 _, err := state.JobVersionsByID(ws, job.Namespace, job.ID) 1945 assert.Nil(err) 1946 assert.Nil(state.UpsertJob(1000, job)) 1947 assert.True(watchFired(ws)) 1948 1949 var finalJob *structs.Job 1950 for i := 1; i < 20; i++ { 1951 finalJob = mock.Job() 1952 finalJob.ID = job.ID 1953 finalJob.Priority = i 1954 assert.Nil(state.UpsertJob(uint64(1000+i), finalJob)) 1955 } 1956 1957 assert.Nil(state.DeleteJob(1020, job.Namespace, job.ID)) 1958 assert.True(watchFired(ws)) 1959 1960 ws = memdb.NewWatchSet() 1961 out, err := state.JobByID(ws, job.Namespace, job.ID) 1962 assert.Nil(err) 1963 assert.Nil(out) 1964 1965 index, err := state.Index("jobs") 1966 assert.Nil(err) 1967 assert.EqualValues(1020, index) 1968 1969 summary, err := state.JobSummaryByID(ws, job.Namespace, job.ID) 1970 assert.Nil(err) 1971 assert.Nil(summary) 1972 1973 index, err = state.Index("job_version") 1974 assert.Nil(err) 1975 assert.EqualValues(1020, index) 1976 1977 versions, err := state.JobVersionsByID(ws, job.Namespace, job.ID) 1978 assert.Nil(err) 1979 assert.Len(versions, 0) 1980 1981 index, err = state.Index("job_summary") 1982 assert.Nil(err) 1983 assert.EqualValues(1020, index) 1984 1985 assert.False(watchFired(ws)) 1986 } 1987 1988 func TestStateStore_DeleteJob_ChildJob(t *testing.T) { 1989 t.Parallel() 1990 1991 state := testStateStore(t) 1992 1993 parent := mock.Job() 1994 if err := state.UpsertJob(998, parent); err != nil { 1995 t.Fatalf("err: %v", err) 1996 } 1997 1998 child := mock.Job() 1999 child.ParentID = parent.ID 2000 2001 if err := state.UpsertJob(999, child); err != nil { 2002 t.Fatalf("err: %v", err) 2003 } 2004 2005 // Create a watchset so we can test that delete fires the watch 2006 ws := memdb.NewWatchSet() 2007 if _, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID); err != nil { 2008 t.Fatalf("bad: %v", err) 2009 } 2010 2011 err := state.DeleteJob(1001, child.Namespace, child.ID) 2012 if err != nil { 2013 t.Fatalf("err: %v", err) 2014 } 2015 if !watchFired(ws) { 2016 t.Fatalf("bad") 2017 } 2018 2019 ws = memdb.NewWatchSet() 2020 summary, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID) 2021 if err != nil { 2022 t.Fatalf("err: %v", err) 2023 } 2024 if summary == nil { 2025 t.Fatalf("nil summary") 2026 } 2027 if summary.JobID != parent.ID { 2028 t.Fatalf("bad summary id: %v", parent.ID) 2029 } 2030 if summary.Children == nil { 2031 t.Fatalf("nil children summary") 2032 } 2033 if summary.Children.Pending != 0 || summary.Children.Running != 0 || summary.Children.Dead != 1 { 2034 t.Fatalf("bad children summary: %v", summary.Children) 2035 } 2036 if watchFired(ws) { 2037 t.Fatalf("bad") 2038 } 2039 } 2040 2041 func TestStateStore_Jobs(t *testing.T) { 2042 t.Parallel() 2043 2044 state := testStateStore(t) 2045 var jobs []*structs.Job 2046 2047 for i := 0; i < 10; i++ { 2048 job := mock.Job() 2049 jobs = append(jobs, job) 2050 2051 err := state.UpsertJob(1000+uint64(i), job) 2052 if err != nil { 2053 t.Fatalf("err: %v", err) 2054 } 2055 } 2056 2057 ws := memdb.NewWatchSet() 2058 iter, err := state.Jobs(ws) 2059 if err != nil { 2060 t.Fatalf("err: %v", err) 2061 } 2062 2063 var out []*structs.Job 2064 for { 2065 raw := iter.Next() 2066 if raw == nil { 2067 break 2068 } 2069 out = append(out, raw.(*structs.Job)) 2070 } 2071 2072 sort.Sort(JobIDSort(jobs)) 2073 sort.Sort(JobIDSort(out)) 2074 2075 if !reflect.DeepEqual(jobs, out) { 2076 t.Fatalf("bad: %#v %#v", jobs, out) 2077 } 2078 if watchFired(ws) { 2079 t.Fatalf("bad") 2080 } 2081 } 2082 2083 func TestStateStore_JobVersions(t *testing.T) { 2084 t.Parallel() 2085 2086 state := testStateStore(t) 2087 var jobs []*structs.Job 2088 2089 for i := 0; i < 10; i++ { 2090 job := mock.Job() 2091 jobs = append(jobs, job) 2092 2093 err := state.UpsertJob(1000+uint64(i), job) 2094 if err != nil { 2095 t.Fatalf("err: %v", err) 2096 } 2097 } 2098 2099 ws := memdb.NewWatchSet() 2100 iter, err := state.JobVersions(ws) 2101 if err != nil { 2102 t.Fatalf("err: %v", err) 2103 } 2104 2105 var out []*structs.Job 2106 for { 2107 raw := iter.Next() 2108 if raw == nil { 2109 break 2110 } 2111 out = append(out, raw.(*structs.Job)) 2112 } 2113 2114 sort.Sort(JobIDSort(jobs)) 2115 sort.Sort(JobIDSort(out)) 2116 2117 if !reflect.DeepEqual(jobs, out) { 2118 t.Fatalf("bad: %#v %#v", jobs, out) 2119 } 2120 if watchFired(ws) { 2121 t.Fatalf("bad") 2122 } 2123 } 2124 2125 func TestStateStore_JobsByIDPrefix(t *testing.T) { 2126 t.Parallel() 2127 2128 state := testStateStore(t) 2129 job := mock.Job() 2130 2131 job.ID = "redis" 2132 err := state.UpsertJob(1000, job) 2133 if err != nil { 2134 t.Fatalf("err: %v", err) 2135 } 2136 2137 ws := memdb.NewWatchSet() 2138 iter, err := state.JobsByIDPrefix(ws, job.Namespace, job.ID) 2139 if err != nil { 2140 t.Fatalf("err: %v", err) 2141 } 2142 2143 gatherJobs := func(iter memdb.ResultIterator) []*structs.Job { 2144 var jobs []*structs.Job 2145 for { 2146 raw := iter.Next() 2147 if raw == nil { 2148 break 2149 } 2150 jobs = append(jobs, raw.(*structs.Job)) 2151 } 2152 return jobs 2153 } 2154 2155 jobs := gatherJobs(iter) 2156 if len(jobs) != 1 { 2157 t.Fatalf("err: %v", err) 2158 } 2159 2160 iter, err = state.JobsByIDPrefix(ws, job.Namespace, "re") 2161 if err != nil { 2162 t.Fatalf("err: %v", err) 2163 } 2164 2165 jobs = gatherJobs(iter) 2166 if len(jobs) != 1 { 2167 t.Fatalf("err: %v", err) 2168 } 2169 if watchFired(ws) { 2170 t.Fatalf("bad") 2171 } 2172 2173 job = mock.Job() 2174 job.ID = "riak" 2175 err = state.UpsertJob(1001, job) 2176 if err != nil { 2177 t.Fatalf("err: %v", err) 2178 } 2179 2180 if !watchFired(ws) { 2181 t.Fatalf("bad") 2182 } 2183 2184 ws = memdb.NewWatchSet() 2185 iter, err = state.JobsByIDPrefix(ws, job.Namespace, "r") 2186 if err != nil { 2187 t.Fatalf("err: %v", err) 2188 } 2189 2190 jobs = gatherJobs(iter) 2191 if len(jobs) != 2 { 2192 t.Fatalf("err: %v", err) 2193 } 2194 2195 iter, err = state.JobsByIDPrefix(ws, job.Namespace, "ri") 2196 if err != nil { 2197 t.Fatalf("err: %v", err) 2198 } 2199 2200 jobs = gatherJobs(iter) 2201 if len(jobs) != 1 { 2202 t.Fatalf("err: %v", err) 2203 } 2204 if watchFired(ws) { 2205 t.Fatalf("bad") 2206 } 2207 } 2208 2209 func TestStateStore_JobsByPeriodic(t *testing.T) { 2210 t.Parallel() 2211 2212 state := testStateStore(t) 2213 var periodic, nonPeriodic []*structs.Job 2214 2215 for i := 0; i < 10; i++ { 2216 job := mock.Job() 2217 nonPeriodic = append(nonPeriodic, job) 2218 2219 err := state.UpsertJob(1000+uint64(i), job) 2220 if err != nil { 2221 t.Fatalf("err: %v", err) 2222 } 2223 } 2224 2225 for i := 0; i < 10; i++ { 2226 job := mock.PeriodicJob() 2227 periodic = append(periodic, job) 2228 2229 err := state.UpsertJob(2000+uint64(i), job) 2230 if err != nil { 2231 t.Fatalf("err: %v", err) 2232 } 2233 } 2234 2235 ws := memdb.NewWatchSet() 2236 iter, err := state.JobsByPeriodic(ws, true) 2237 if err != nil { 2238 t.Fatalf("err: %v", err) 2239 } 2240 2241 var outPeriodic []*structs.Job 2242 for { 2243 raw := iter.Next() 2244 if raw == nil { 2245 break 2246 } 2247 outPeriodic = append(outPeriodic, raw.(*structs.Job)) 2248 } 2249 2250 iter, err = state.JobsByPeriodic(ws, false) 2251 if err != nil { 2252 t.Fatalf("err: %v", err) 2253 } 2254 2255 var outNonPeriodic []*structs.Job 2256 for { 2257 raw := iter.Next() 2258 if raw == nil { 2259 break 2260 } 2261 outNonPeriodic = append(outNonPeriodic, raw.(*structs.Job)) 2262 } 2263 2264 sort.Sort(JobIDSort(periodic)) 2265 sort.Sort(JobIDSort(nonPeriodic)) 2266 sort.Sort(JobIDSort(outPeriodic)) 2267 sort.Sort(JobIDSort(outNonPeriodic)) 2268 2269 if !reflect.DeepEqual(periodic, outPeriodic) { 2270 t.Fatalf("bad: %#v %#v", periodic, outPeriodic) 2271 } 2272 2273 if !reflect.DeepEqual(nonPeriodic, outNonPeriodic) { 2274 t.Fatalf("bad: %#v %#v", nonPeriodic, outNonPeriodic) 2275 } 2276 if watchFired(ws) { 2277 t.Fatalf("bad") 2278 } 2279 } 2280 2281 func TestStateStore_JobsByScheduler(t *testing.T) { 2282 t.Parallel() 2283 2284 state := testStateStore(t) 2285 var serviceJobs []*structs.Job 2286 var sysJobs []*structs.Job 2287 2288 for i := 0; i < 10; i++ { 2289 job := mock.Job() 2290 serviceJobs = append(serviceJobs, job) 2291 2292 err := state.UpsertJob(1000+uint64(i), job) 2293 if err != nil { 2294 t.Fatalf("err: %v", err) 2295 } 2296 } 2297 2298 for i := 0; i < 10; i++ { 2299 job := mock.SystemJob() 2300 job.Status = structs.JobStatusRunning 2301 sysJobs = append(sysJobs, job) 2302 2303 err := state.UpsertJob(2000+uint64(i), job) 2304 if err != nil { 2305 t.Fatalf("err: %v", err) 2306 } 2307 } 2308 2309 ws := memdb.NewWatchSet() 2310 iter, err := state.JobsByScheduler(ws, "service") 2311 if err != nil { 2312 t.Fatalf("err: %v", err) 2313 } 2314 2315 var outService []*structs.Job 2316 for { 2317 raw := iter.Next() 2318 if raw == nil { 2319 break 2320 } 2321 outService = append(outService, raw.(*structs.Job)) 2322 } 2323 2324 iter, err = state.JobsByScheduler(ws, "system") 2325 if err != nil { 2326 t.Fatalf("err: %v", err) 2327 } 2328 2329 var outSystem []*structs.Job 2330 for { 2331 raw := iter.Next() 2332 if raw == nil { 2333 break 2334 } 2335 outSystem = append(outSystem, raw.(*structs.Job)) 2336 } 2337 2338 sort.Sort(JobIDSort(serviceJobs)) 2339 sort.Sort(JobIDSort(sysJobs)) 2340 sort.Sort(JobIDSort(outService)) 2341 sort.Sort(JobIDSort(outSystem)) 2342 2343 if !reflect.DeepEqual(serviceJobs, outService) { 2344 t.Fatalf("bad: %#v %#v", serviceJobs, outService) 2345 } 2346 2347 if !reflect.DeepEqual(sysJobs, outSystem) { 2348 t.Fatalf("bad: %#v %#v", sysJobs, outSystem) 2349 } 2350 if watchFired(ws) { 2351 t.Fatalf("bad") 2352 } 2353 } 2354 2355 func TestStateStore_JobsByGC(t *testing.T) { 2356 t.Parallel() 2357 2358 state := testStateStore(t) 2359 gc, nonGc := make(map[string]struct{}), make(map[string]struct{}) 2360 2361 for i := 0; i < 20; i++ { 2362 var job *structs.Job 2363 if i%2 == 0 { 2364 job = mock.Job() 2365 } else { 2366 job = mock.PeriodicJob() 2367 } 2368 nonGc[job.ID] = struct{}{} 2369 2370 if err := state.UpsertJob(1000+uint64(i), job); err != nil { 2371 t.Fatalf("err: %v", err) 2372 } 2373 } 2374 2375 for i := 0; i < 20; i += 2 { 2376 job := mock.Job() 2377 job.Type = structs.JobTypeBatch 2378 gc[job.ID] = struct{}{} 2379 2380 if err := state.UpsertJob(2000+uint64(i), job); err != nil { 2381 t.Fatalf("err: %v", err) 2382 } 2383 2384 // Create an eval for it 2385 eval := mock.Eval() 2386 eval.JobID = job.ID 2387 eval.Status = structs.EvalStatusComplete 2388 if err := state.UpsertEvals(2000+uint64(i+1), []*structs.Evaluation{eval}); err != nil { 2389 t.Fatalf("err: %v", err) 2390 } 2391 2392 } 2393 2394 ws := memdb.NewWatchSet() 2395 iter, err := state.JobsByGC(ws, true) 2396 if err != nil { 2397 t.Fatalf("err: %v", err) 2398 } 2399 2400 outGc := make(map[string]struct{}) 2401 for i := iter.Next(); i != nil; i = iter.Next() { 2402 j := i.(*structs.Job) 2403 outGc[j.ID] = struct{}{} 2404 } 2405 2406 iter, err = state.JobsByGC(ws, false) 2407 if err != nil { 2408 t.Fatalf("err: %v", err) 2409 } 2410 2411 outNonGc := make(map[string]struct{}) 2412 for i := iter.Next(); i != nil; i = iter.Next() { 2413 j := i.(*structs.Job) 2414 outNonGc[j.ID] = struct{}{} 2415 } 2416 2417 if !reflect.DeepEqual(gc, outGc) { 2418 t.Fatalf("bad: %#v %#v", gc, outGc) 2419 } 2420 2421 if !reflect.DeepEqual(nonGc, outNonGc) { 2422 t.Fatalf("bad: %#v %#v", nonGc, outNonGc) 2423 } 2424 if watchFired(ws) { 2425 t.Fatalf("bad") 2426 } 2427 } 2428 2429 func TestStateStore_RestoreJob(t *testing.T) { 2430 t.Parallel() 2431 2432 state := testStateStore(t) 2433 job := mock.Job() 2434 2435 restore, err := state.Restore() 2436 if err != nil { 2437 t.Fatalf("err: %v", err) 2438 } 2439 2440 err = restore.JobRestore(job) 2441 if err != nil { 2442 t.Fatalf("err: %v", err) 2443 } 2444 restore.Commit() 2445 2446 ws := memdb.NewWatchSet() 2447 out, err := state.JobByID(ws, job.Namespace, job.ID) 2448 if err != nil { 2449 t.Fatalf("err: %v", err) 2450 } 2451 2452 if !reflect.DeepEqual(out, job) { 2453 t.Fatalf("Bad: %#v %#v", out, job) 2454 } 2455 } 2456 2457 func TestStateStore_UpsertPeriodicLaunch(t *testing.T) { 2458 t.Parallel() 2459 2460 state := testStateStore(t) 2461 job := mock.Job() 2462 launch := &structs.PeriodicLaunch{ 2463 ID: job.ID, 2464 Namespace: job.Namespace, 2465 Launch: time.Now(), 2466 } 2467 2468 // Create a watchset so we can test that upsert fires the watch 2469 ws := memdb.NewWatchSet() 2470 if _, err := state.PeriodicLaunchByID(ws, job.Namespace, launch.ID); err != nil { 2471 t.Fatalf("bad: %v", err) 2472 } 2473 2474 err := state.UpsertPeriodicLaunch(1000, launch) 2475 if err != nil { 2476 t.Fatalf("err: %v", err) 2477 } 2478 2479 if !watchFired(ws) { 2480 t.Fatalf("bad") 2481 } 2482 2483 ws = memdb.NewWatchSet() 2484 out, err := state.PeriodicLaunchByID(ws, job.Namespace, job.ID) 2485 if err != nil { 2486 t.Fatalf("err: %v", err) 2487 } 2488 if out.CreateIndex != 1000 { 2489 t.Fatalf("bad: %#v", out) 2490 } 2491 if out.ModifyIndex != 1000 { 2492 t.Fatalf("bad: %#v", out) 2493 } 2494 2495 if !reflect.DeepEqual(launch, out) { 2496 t.Fatalf("bad: %#v %#v", job, out) 2497 } 2498 2499 index, err := state.Index("periodic_launch") 2500 if err != nil { 2501 t.Fatalf("err: %v", err) 2502 } 2503 if index != 1000 { 2504 t.Fatalf("bad: %d", index) 2505 } 2506 2507 if watchFired(ws) { 2508 t.Fatalf("bad") 2509 } 2510 } 2511 2512 func TestStateStore_UpdateUpsertPeriodicLaunch(t *testing.T) { 2513 t.Parallel() 2514 2515 state := testStateStore(t) 2516 job := mock.Job() 2517 launch := &structs.PeriodicLaunch{ 2518 ID: job.ID, 2519 Namespace: job.Namespace, 2520 Launch: time.Now(), 2521 } 2522 2523 err := state.UpsertPeriodicLaunch(1000, launch) 2524 if err != nil { 2525 t.Fatalf("err: %v", err) 2526 } 2527 2528 // Create a watchset so we can test that upsert fires the watch 2529 ws := memdb.NewWatchSet() 2530 if _, err := state.PeriodicLaunchByID(ws, job.Namespace, launch.ID); err != nil { 2531 t.Fatalf("bad: %v", err) 2532 } 2533 2534 launch2 := &structs.PeriodicLaunch{ 2535 ID: job.ID, 2536 Namespace: job.Namespace, 2537 Launch: launch.Launch.Add(1 * time.Second), 2538 } 2539 err = state.UpsertPeriodicLaunch(1001, launch2) 2540 if err != nil { 2541 t.Fatalf("err: %v", err) 2542 } 2543 2544 if !watchFired(ws) { 2545 t.Fatalf("bad") 2546 } 2547 2548 ws = memdb.NewWatchSet() 2549 out, err := state.PeriodicLaunchByID(ws, job.Namespace, job.ID) 2550 if err != nil { 2551 t.Fatalf("err: %v", err) 2552 } 2553 if out.CreateIndex != 1000 { 2554 t.Fatalf("bad: %#v", out) 2555 } 2556 if out.ModifyIndex != 1001 { 2557 t.Fatalf("bad: %#v", out) 2558 } 2559 2560 if !reflect.DeepEqual(launch2, out) { 2561 t.Fatalf("bad: %#v %#v", launch2, out) 2562 } 2563 2564 index, err := state.Index("periodic_launch") 2565 if err != nil { 2566 t.Fatalf("err: %v", err) 2567 } 2568 if index != 1001 { 2569 t.Fatalf("bad: %d", index) 2570 } 2571 2572 if watchFired(ws) { 2573 t.Fatalf("bad") 2574 } 2575 } 2576 2577 func TestStateStore_DeletePeriodicLaunch(t *testing.T) { 2578 t.Parallel() 2579 2580 state := testStateStore(t) 2581 job := mock.Job() 2582 launch := &structs.PeriodicLaunch{ 2583 ID: job.ID, 2584 Namespace: job.Namespace, 2585 Launch: time.Now(), 2586 } 2587 2588 err := state.UpsertPeriodicLaunch(1000, launch) 2589 if err != nil { 2590 t.Fatalf("err: %v", err) 2591 } 2592 2593 // Create a watchset so we can test that delete fires the watch 2594 ws := memdb.NewWatchSet() 2595 if _, err := state.PeriodicLaunchByID(ws, job.Namespace, launch.ID); err != nil { 2596 t.Fatalf("bad: %v", err) 2597 } 2598 2599 err = state.DeletePeriodicLaunch(1001, launch.Namespace, launch.ID) 2600 if err != nil { 2601 t.Fatalf("err: %v", err) 2602 } 2603 2604 if !watchFired(ws) { 2605 t.Fatalf("bad") 2606 } 2607 2608 ws = memdb.NewWatchSet() 2609 out, err := state.PeriodicLaunchByID(ws, job.Namespace, job.ID) 2610 if err != nil { 2611 t.Fatalf("err: %v", err) 2612 } 2613 2614 if out != nil { 2615 t.Fatalf("bad: %#v %#v", job, out) 2616 } 2617 2618 index, err := state.Index("periodic_launch") 2619 if err != nil { 2620 t.Fatalf("err: %v", err) 2621 } 2622 if index != 1001 { 2623 t.Fatalf("bad: %d", index) 2624 } 2625 2626 if watchFired(ws) { 2627 t.Fatalf("bad") 2628 } 2629 } 2630 2631 func TestStateStore_PeriodicLaunches(t *testing.T) { 2632 t.Parallel() 2633 2634 state := testStateStore(t) 2635 var launches []*structs.PeriodicLaunch 2636 2637 for i := 0; i < 10; i++ { 2638 job := mock.Job() 2639 launch := &structs.PeriodicLaunch{ 2640 ID: job.ID, 2641 Namespace: job.Namespace, 2642 Launch: time.Now(), 2643 } 2644 launches = append(launches, launch) 2645 2646 err := state.UpsertPeriodicLaunch(1000+uint64(i), launch) 2647 if err != nil { 2648 t.Fatalf("err: %v", err) 2649 } 2650 } 2651 2652 ws := memdb.NewWatchSet() 2653 iter, err := state.PeriodicLaunches(ws) 2654 if err != nil { 2655 t.Fatalf("err: %v", err) 2656 } 2657 2658 out := make(map[string]*structs.PeriodicLaunch, 10) 2659 for { 2660 raw := iter.Next() 2661 if raw == nil { 2662 break 2663 } 2664 launch := raw.(*structs.PeriodicLaunch) 2665 if _, ok := out[launch.ID]; ok { 2666 t.Fatalf("duplicate: %v", launch.ID) 2667 } 2668 2669 out[launch.ID] = launch 2670 } 2671 2672 for _, launch := range launches { 2673 l, ok := out[launch.ID] 2674 if !ok { 2675 t.Fatalf("bad %v", launch.ID) 2676 } 2677 2678 if !reflect.DeepEqual(launch, l) { 2679 t.Fatalf("bad: %#v %#v", launch, l) 2680 } 2681 2682 delete(out, launch.ID) 2683 } 2684 2685 if len(out) != 0 { 2686 t.Fatalf("leftover: %#v", out) 2687 } 2688 2689 if watchFired(ws) { 2690 t.Fatalf("bad") 2691 } 2692 } 2693 2694 func TestStateStore_RestorePeriodicLaunch(t *testing.T) { 2695 t.Parallel() 2696 2697 state := testStateStore(t) 2698 job := mock.Job() 2699 launch := &structs.PeriodicLaunch{ 2700 ID: job.ID, 2701 Namespace: job.Namespace, 2702 Launch: time.Now(), 2703 } 2704 2705 restore, err := state.Restore() 2706 if err != nil { 2707 t.Fatalf("err: %v", err) 2708 } 2709 2710 err = restore.PeriodicLaunchRestore(launch) 2711 if err != nil { 2712 t.Fatalf("err: %v", err) 2713 } 2714 restore.Commit() 2715 2716 ws := memdb.NewWatchSet() 2717 out, err := state.PeriodicLaunchByID(ws, job.Namespace, job.ID) 2718 if err != nil { 2719 t.Fatalf("err: %v", err) 2720 } 2721 2722 if !reflect.DeepEqual(out, launch) { 2723 t.Fatalf("Bad: %#v %#v", out, job) 2724 } 2725 2726 if watchFired(ws) { 2727 t.Fatalf("bad") 2728 } 2729 } 2730 2731 func TestStateStore_RestoreJobVersion(t *testing.T) { 2732 t.Parallel() 2733 2734 state := testStateStore(t) 2735 job := mock.Job() 2736 2737 restore, err := state.Restore() 2738 if err != nil { 2739 t.Fatalf("err: %v", err) 2740 } 2741 2742 err = restore.JobVersionRestore(job) 2743 if err != nil { 2744 t.Fatalf("err: %v", err) 2745 } 2746 restore.Commit() 2747 2748 ws := memdb.NewWatchSet() 2749 out, err := state.JobByIDAndVersion(ws, job.Namespace, job.ID, job.Version) 2750 if err != nil { 2751 t.Fatalf("err: %v", err) 2752 } 2753 2754 if !reflect.DeepEqual(out, job) { 2755 t.Fatalf("Bad: %#v %#v", out, job) 2756 } 2757 2758 if watchFired(ws) { 2759 t.Fatalf("bad") 2760 } 2761 } 2762 2763 func TestStateStore_RestoreDeployment(t *testing.T) { 2764 t.Parallel() 2765 2766 state := testStateStore(t) 2767 d := mock.Deployment() 2768 2769 restore, err := state.Restore() 2770 if err != nil { 2771 t.Fatalf("err: %v", err) 2772 } 2773 2774 err = restore.DeploymentRestore(d) 2775 if err != nil { 2776 t.Fatalf("err: %v", err) 2777 } 2778 restore.Commit() 2779 2780 ws := memdb.NewWatchSet() 2781 out, err := state.DeploymentByID(ws, d.ID) 2782 if err != nil { 2783 t.Fatalf("err: %v", err) 2784 } 2785 2786 if !reflect.DeepEqual(out, d) { 2787 t.Fatalf("Bad: %#v %#v", out, d) 2788 } 2789 2790 if watchFired(ws) { 2791 t.Fatalf("bad") 2792 } 2793 } 2794 2795 func TestStateStore_RestoreJobSummary(t *testing.T) { 2796 t.Parallel() 2797 2798 state := testStateStore(t) 2799 job := mock.Job() 2800 jobSummary := &structs.JobSummary{ 2801 JobID: job.ID, 2802 Namespace: job.Namespace, 2803 Summary: map[string]structs.TaskGroupSummary{ 2804 "web": { 2805 Starting: 10, 2806 }, 2807 }, 2808 } 2809 restore, err := state.Restore() 2810 if err != nil { 2811 t.Fatalf("err: %v", err) 2812 } 2813 2814 err = restore.JobSummaryRestore(jobSummary) 2815 if err != nil { 2816 t.Fatalf("err: %v", err) 2817 } 2818 restore.Commit() 2819 2820 ws := memdb.NewWatchSet() 2821 out, err := state.JobSummaryByID(ws, job.Namespace, job.ID) 2822 if err != nil { 2823 t.Fatalf("err: %v", err) 2824 } 2825 2826 if !reflect.DeepEqual(out, jobSummary) { 2827 t.Fatalf("Bad: %#v %#v", out, jobSummary) 2828 } 2829 } 2830 2831 // TestStateStore_CSIVolume checks register, list and deregister for csi_volumes 2832 func TestStateStore_CSIVolume(t *testing.T) { 2833 state := testStateStore(t) 2834 index := uint64(1000) 2835 2836 // Volume IDs 2837 vol0, vol1 := uuid.Generate(), uuid.Generate() 2838 2839 // Create a node running a healthy instance of the plugin 2840 node := mock.Node() 2841 pluginID := "minnie" 2842 alloc := mock.Alloc() 2843 alloc.DesiredStatus = "run" 2844 alloc.ClientStatus = "running" 2845 alloc.NodeID = node.ID 2846 alloc.Job.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{ 2847 "foo": { 2848 Name: "foo", 2849 Source: vol0, 2850 Type: "csi", 2851 }, 2852 } 2853 2854 node.CSINodePlugins = map[string]*structs.CSIInfo{ 2855 pluginID: { 2856 PluginID: pluginID, 2857 AllocID: alloc.ID, 2858 Healthy: true, 2859 HealthDescription: "healthy", 2860 RequiresControllerPlugin: false, 2861 RequiresTopologies: false, 2862 NodeInfo: &structs.CSINodeInfo{ 2863 ID: node.ID, 2864 MaxVolumes: 64, 2865 RequiresNodeStageVolume: true, 2866 }, 2867 }, 2868 } 2869 2870 index++ 2871 err := state.UpsertNode(index, node) 2872 require.NoError(t, err) 2873 defer state.DeleteNode(9999, []string{pluginID}) 2874 2875 index++ 2876 err = state.UpsertAllocs(index, []*structs.Allocation{alloc}) 2877 require.NoError(t, err) 2878 2879 ns := structs.DefaultNamespace 2880 2881 v0 := structs.NewCSIVolume("foo", index) 2882 v0.ID = vol0 2883 v0.Namespace = ns 2884 v0.PluginID = "minnie" 2885 v0.Schedulable = true 2886 v0.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter 2887 v0.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem 2888 2889 index++ 2890 v1 := structs.NewCSIVolume("foo", index) 2891 v1.ID = vol1 2892 v1.Namespace = ns 2893 v1.PluginID = "adam" 2894 v1.Schedulable = true 2895 v1.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter 2896 v1.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem 2897 2898 index++ 2899 err = state.CSIVolumeRegister(index, []*structs.CSIVolume{v0, v1}) 2900 require.NoError(t, err) 2901 2902 // volume registration is idempotent, unless identies are changed 2903 index++ 2904 err = state.CSIVolumeRegister(index, []*structs.CSIVolume{v0, v1}) 2905 require.NoError(t, err) 2906 2907 index++ 2908 v2 := v0.Copy() 2909 v2.PluginID = "new-id" 2910 err = state.CSIVolumeRegister(index, []*structs.CSIVolume{v2}) 2911 require.Error(t, err, fmt.Sprintf("volume exists: %s", v0.ID)) 2912 2913 ws := memdb.NewWatchSet() 2914 iter, err := state.CSIVolumesByNamespace(ws, ns) 2915 require.NoError(t, err) 2916 2917 slurp := func(iter memdb.ResultIterator) (vs []*structs.CSIVolume) { 2918 for { 2919 raw := iter.Next() 2920 if raw == nil { 2921 break 2922 } 2923 vol := raw.(*structs.CSIVolume) 2924 vs = append(vs, vol) 2925 } 2926 return vs 2927 } 2928 2929 vs := slurp(iter) 2930 require.Equal(t, 2, len(vs)) 2931 2932 ws = memdb.NewWatchSet() 2933 iter, err = state.CSIVolumesByPluginID(ws, ns, "minnie") 2934 require.NoError(t, err) 2935 vs = slurp(iter) 2936 require.Equal(t, 1, len(vs)) 2937 2938 ws = memdb.NewWatchSet() 2939 iter, err = state.CSIVolumesByNodeID(ws, node.ID) 2940 require.NoError(t, err) 2941 vs = slurp(iter) 2942 require.Equal(t, 1, len(vs)) 2943 2944 // Allocs 2945 a0 := mock.Alloc() 2946 a1 := mock.Alloc() 2947 index++ 2948 err = state.UpsertAllocs(index, []*structs.Allocation{a0, a1}) 2949 require.NoError(t, err) 2950 2951 // Claims 2952 r := structs.CSIVolumeClaimRead 2953 w := structs.CSIVolumeClaimWrite 2954 u := structs.CSIVolumeClaimRelease 2955 claim0 := &structs.CSIVolumeClaim{ 2956 AllocationID: a0.ID, 2957 NodeID: node.ID, 2958 Mode: r, 2959 } 2960 claim1 := &structs.CSIVolumeClaim{ 2961 AllocationID: a1.ID, 2962 NodeID: node.ID, 2963 Mode: w, 2964 } 2965 2966 index++ 2967 err = state.CSIVolumeClaim(index, ns, vol0, claim0) 2968 require.NoError(t, err) 2969 index++ 2970 err = state.CSIVolumeClaim(index, ns, vol0, claim1) 2971 require.NoError(t, err) 2972 2973 ws = memdb.NewWatchSet() 2974 iter, err = state.CSIVolumesByPluginID(ws, ns, "minnie") 2975 require.NoError(t, err) 2976 vs = slurp(iter) 2977 require.False(t, vs[0].WriteFreeClaims()) 2978 2979 claim0.Mode = u 2980 err = state.CSIVolumeClaim(2, ns, vol0, claim0) 2981 require.NoError(t, err) 2982 ws = memdb.NewWatchSet() 2983 iter, err = state.CSIVolumesByPluginID(ws, ns, "minnie") 2984 require.NoError(t, err) 2985 vs = slurp(iter) 2986 require.True(t, vs[0].ReadSchedulable()) 2987 2988 // registration is an error when the volume is in use 2989 index++ 2990 err = state.CSIVolumeRegister(index, []*structs.CSIVolume{v0}) 2991 require.Error(t, err, fmt.Sprintf("volume exists: %s", vol0)) 2992 // as is deregistration 2993 index++ 2994 err = state.CSIVolumeDeregister(index, ns, []string{vol0}) 2995 require.Error(t, err, fmt.Sprintf("volume in use: %s", vol0)) 2996 2997 // release claims to unblock deregister 2998 index++ 2999 claim0.State = structs.CSIVolumeClaimStateReadyToFree 3000 err = state.CSIVolumeClaim(index, ns, vol0, claim0) 3001 require.NoError(t, err) 3002 index++ 3003 claim1.Mode = u 3004 claim1.State = structs.CSIVolumeClaimStateReadyToFree 3005 err = state.CSIVolumeClaim(index, ns, vol0, claim1) 3006 require.NoError(t, err) 3007 3008 index++ 3009 err = state.CSIVolumeDeregister(index, ns, []string{vol0}) 3010 require.NoError(t, err) 3011 3012 // List, now omitting the deregistered volume 3013 ws = memdb.NewWatchSet() 3014 iter, err = state.CSIVolumesByPluginID(ws, ns, "minnie") 3015 require.NoError(t, err) 3016 vs = slurp(iter) 3017 require.Equal(t, 0, len(vs)) 3018 3019 ws = memdb.NewWatchSet() 3020 iter, err = state.CSIVolumesByNamespace(ws, ns) 3021 require.NoError(t, err) 3022 vs = slurp(iter) 3023 require.Equal(t, 1, len(vs)) 3024 } 3025 3026 // TestStateStore_CSIPluginNodes uses node fingerprinting to create a plugin and update health 3027 func TestStateStore_CSIPluginNodes(t *testing.T) { 3028 index := uint64(999) 3029 state := testStateStore(t) 3030 ws := memdb.NewWatchSet() 3031 plugID := "foo" 3032 3033 // Create Nomad client Nodes 3034 ns := []*structs.Node{mock.Node(), mock.Node()} 3035 for _, n := range ns { 3036 index++ 3037 err := state.UpsertNode(index, n) 3038 require.NoError(t, err) 3039 } 3040 3041 // Fingerprint a running controller plugin 3042 n0, _ := state.NodeByID(ws, ns[0].ID) 3043 n0.CSIControllerPlugins = map[string]*structs.CSIInfo{ 3044 plugID: { 3045 PluginID: plugID, 3046 Healthy: true, 3047 UpdateTime: time.Now(), 3048 RequiresControllerPlugin: true, 3049 RequiresTopologies: false, 3050 ControllerInfo: &structs.CSIControllerInfo{ 3051 SupportsReadOnlyAttach: true, 3052 SupportsListVolumes: true, 3053 }, 3054 }, 3055 } 3056 index++ 3057 err := state.UpsertNode(index, n0) 3058 require.NoError(t, err) 3059 3060 // Fingerprint two running node plugins 3061 for _, n := range ns[:] { 3062 n, _ := state.NodeByID(ws, n.ID) 3063 n.CSINodePlugins = map[string]*structs.CSIInfo{ 3064 plugID: { 3065 PluginID: plugID, 3066 Healthy: true, 3067 UpdateTime: time.Now(), 3068 RequiresControllerPlugin: true, 3069 RequiresTopologies: false, 3070 NodeInfo: &structs.CSINodeInfo{}, 3071 }, 3072 } 3073 index++ 3074 err = state.UpsertNode(index, n) 3075 require.NoError(t, err) 3076 } 3077 3078 plug, err := state.CSIPluginByID(ws, plugID) 3079 require.NoError(t, err) 3080 require.True(t, plug.ControllerRequired) 3081 require.Equal(t, 1, plug.ControllersHealthy, "controllers healthy") 3082 require.Equal(t, 2, plug.NodesHealthy, "nodes healthy") 3083 require.Equal(t, 1, len(plug.Controllers), "controllers expected") 3084 require.Equal(t, 2, len(plug.Nodes), "nodes expected") 3085 3086 // Volume using the plugin 3087 index++ 3088 vol := &structs.CSIVolume{ 3089 ID: uuid.Generate(), 3090 Namespace: structs.DefaultNamespace, 3091 PluginID: plugID, 3092 } 3093 err = state.CSIVolumeRegister(index, []*structs.CSIVolume{vol}) 3094 require.NoError(t, err) 3095 3096 vol, err = state.CSIVolumeByID(ws, structs.DefaultNamespace, vol.ID) 3097 require.NoError(t, err) 3098 require.True(t, vol.Schedulable, "volume should be schedulable") 3099 3100 // Controller is unhealthy 3101 n0, _ = state.NodeByID(ws, ns[0].ID) 3102 n0.CSIControllerPlugins = map[string]*structs.CSIInfo{ 3103 plugID: { 3104 PluginID: plugID, 3105 Healthy: false, 3106 UpdateTime: time.Now(), 3107 RequiresControllerPlugin: true, 3108 RequiresTopologies: false, 3109 ControllerInfo: &structs.CSIControllerInfo{ 3110 SupportsReadOnlyAttach: true, 3111 SupportsListVolumes: true, 3112 }, 3113 }, 3114 } 3115 3116 index++ 3117 err = state.UpsertNode(index, n0) 3118 require.NoError(t, err) 3119 3120 plug, err = state.CSIPluginByID(ws, plugID) 3121 require.NoError(t, err) 3122 require.Equal(t, 0, plug.ControllersHealthy, "controllers healthy") 3123 require.Equal(t, 2, plug.NodesHealthy, "nodes healthy") 3124 require.Equal(t, 1, len(plug.Controllers), "controllers expected") 3125 require.Equal(t, 2, len(plug.Nodes), "nodes expected") 3126 3127 vol, err = state.CSIVolumeByID(ws, structs.DefaultNamespace, vol.ID) 3128 require.NoError(t, err) 3129 require.False(t, vol.Schedulable, "volume should not be schedulable") 3130 3131 // Node plugin is removed 3132 n1, _ := state.NodeByID(ws, ns[1].ID) 3133 n1.CSINodePlugins = map[string]*structs.CSIInfo{} 3134 index++ 3135 err = state.UpsertNode(index, n1) 3136 require.NoError(t, err) 3137 3138 plug, err = state.CSIPluginByID(ws, plugID) 3139 require.NoError(t, err) 3140 require.Equal(t, 0, plug.ControllersHealthy, "controllers healthy") 3141 require.Equal(t, 1, plug.NodesHealthy, "nodes healthy") 3142 require.Equal(t, 1, len(plug.Controllers), "controllers expected") 3143 require.Equal(t, 1, len(plug.Nodes), "nodes expected") 3144 3145 // Last node plugin is removed 3146 n0, _ = state.NodeByID(ws, ns[0].ID) 3147 n0.CSINodePlugins = map[string]*structs.CSIInfo{} 3148 index++ 3149 err = state.UpsertNode(index, n0) 3150 require.NoError(t, err) 3151 3152 // Nodes plugins should be gone but controllers left 3153 plug, err = state.CSIPluginByID(ws, plugID) 3154 require.NoError(t, err) 3155 require.Equal(t, 0, plug.ControllersHealthy, "controllers healthy") 3156 require.Equal(t, 0, plug.NodesHealthy, "nodes healthy") 3157 require.Equal(t, 1, len(plug.Controllers), "controllers expected") 3158 require.Equal(t, 0, len(plug.Nodes), "nodes expected") 3159 3160 // A node plugin is restored 3161 n0, _ = state.NodeByID(ws, n0.ID) 3162 n0.CSINodePlugins = map[string]*structs.CSIInfo{ 3163 plugID: { 3164 PluginID: plugID, 3165 Healthy: true, 3166 UpdateTime: time.Now(), 3167 RequiresControllerPlugin: true, 3168 RequiresTopologies: false, 3169 NodeInfo: &structs.CSINodeInfo{}, 3170 }, 3171 } 3172 index++ 3173 err = state.UpsertNode(index, n0) 3174 require.NoError(t, err) 3175 3176 // Nodes plugin should be replaced and healthy 3177 plug, err = state.CSIPluginByID(ws, plugID) 3178 require.NoError(t, err) 3179 require.Equal(t, 0, plug.ControllersHealthy, "controllers healthy") 3180 require.Equal(t, 1, plug.NodesHealthy, "nodes healthy") 3181 require.Equal(t, 1, len(plug.Controllers), "controllers expected") 3182 require.Equal(t, 1, len(plug.Nodes), "nodes expected") 3183 3184 // Remove node again 3185 n0, _ = state.NodeByID(ws, ns[0].ID) 3186 n0.CSINodePlugins = map[string]*structs.CSIInfo{} 3187 index++ 3188 err = state.UpsertNode(index, n0) 3189 require.NoError(t, err) 3190 3191 // Nodes plugins should be gone but controllers left 3192 plug, err = state.CSIPluginByID(ws, plugID) 3193 require.NoError(t, err) 3194 require.Equal(t, 0, plug.ControllersHealthy, "controllers healthy") 3195 require.Equal(t, 0, plug.NodesHealthy, "nodes healthy") 3196 require.Equal(t, 1, len(plug.Controllers), "controllers expected") 3197 require.Equal(t, 0, len(plug.Nodes), "nodes expected") 3198 3199 // controller is removed 3200 n0, _ = state.NodeByID(ws, ns[0].ID) 3201 n0.CSIControllerPlugins = map[string]*structs.CSIInfo{} 3202 index++ 3203 err = state.UpsertNode(index, n0) 3204 require.NoError(t, err) 3205 3206 // Plugin has been removed entirely 3207 plug, err = state.CSIPluginByID(ws, plugID) 3208 require.NoError(t, err) 3209 require.Nil(t, plug) 3210 3211 // Volume still exists and is safe to query, but unschedulable 3212 vol, err = state.CSIVolumeByID(ws, structs.DefaultNamespace, vol.ID) 3213 require.NoError(t, err) 3214 require.False(t, vol.Schedulable) 3215 } 3216 3217 // TestStateStore_CSIPluginAllocUpdates tests the ordering 3218 // interactions for CSI plugins between Nomad client node updates and 3219 // allocation updates. 3220 func TestStateStore_CSIPluginAllocUpdates(t *testing.T) { 3221 t.Parallel() 3222 index := uint64(999) 3223 state := testStateStore(t) 3224 ws := memdb.NewWatchSet() 3225 3226 n := mock.Node() 3227 index++ 3228 err := state.UpsertNode(index, n) 3229 require.NoError(t, err) 3230 3231 // (1) unhealthy fingerprint, then terminal alloc, then healthy node update 3232 plugID0 := "foo0" 3233 3234 alloc0 := mock.Alloc() 3235 alloc0.NodeID = n.ID 3236 alloc0.DesiredStatus = "run" 3237 alloc0.ClientStatus = "running" 3238 alloc0.Job.TaskGroups[0].Tasks[0].CSIPluginConfig = &structs.TaskCSIPluginConfig{ID: plugID0} 3239 index++ 3240 err = state.UpsertAllocs(index, []*structs.Allocation{alloc0}) 3241 require.NoError(t, err) 3242 3243 n, _ = state.NodeByID(ws, n.ID) 3244 n.CSINodePlugins = map[string]*structs.CSIInfo{ 3245 plugID0: { 3246 PluginID: plugID0, 3247 AllocID: alloc0.ID, 3248 Healthy: false, 3249 UpdateTime: time.Now(), 3250 RequiresControllerPlugin: true, 3251 NodeInfo: &structs.CSINodeInfo{}, 3252 }, 3253 } 3254 index++ 3255 err = state.UpsertNode(index, n) 3256 require.NoError(t, err) 3257 3258 plug, err := state.CSIPluginByID(ws, plugID0) 3259 require.NoError(t, err) 3260 require.Nil(t, plug, "no plugin should exist: not yet healthy") 3261 3262 alloc0.DesiredStatus = "stopped" 3263 alloc0.ClientStatus = "complete" 3264 index++ 3265 err = state.UpsertAllocs(index, []*structs.Allocation{alloc0}) 3266 require.NoError(t, err) 3267 3268 plug, err = state.CSIPluginByID(ws, plugID0) 3269 require.NoError(t, err) 3270 require.Nil(t, plug, "no plugin should exist: allocs never healthy") 3271 3272 n, _ = state.NodeByID(ws, n.ID) 3273 n.CSINodePlugins[plugID0].Healthy = true 3274 n.CSINodePlugins[plugID0].UpdateTime = time.Now() 3275 index++ 3276 err = state.UpsertNode(index, n) 3277 require.NoError(t, err) 3278 3279 plug, err = state.CSIPluginByID(ws, plugID0) 3280 require.NoError(t, err) 3281 require.NotNil(t, plug, "plugin should exist") 3282 3283 // (2) healthy fingerprint, then terminal alloc update 3284 plugID1 := "foo1" 3285 3286 alloc1 := mock.Alloc() 3287 n, _ = state.NodeByID(ws, n.ID) 3288 n.CSINodePlugins = map[string]*structs.CSIInfo{ 3289 plugID1: { 3290 PluginID: plugID1, 3291 AllocID: alloc1.ID, 3292 Healthy: true, 3293 UpdateTime: time.Now(), 3294 RequiresControllerPlugin: true, 3295 NodeInfo: &structs.CSINodeInfo{}, 3296 }, 3297 } 3298 index++ 3299 err = state.UpsertNode(index, n) 3300 require.NoError(t, err) 3301 3302 plug, err = state.CSIPluginByID(ws, plugID1) 3303 require.NoError(t, err) 3304 require.NotNil(t, plug, "plugin should exist") 3305 3306 alloc1.NodeID = n.ID 3307 alloc1.DesiredStatus = "stop" 3308 alloc1.ClientStatus = "complete" 3309 alloc1.Job.TaskGroups[0].Tasks[0].CSIPluginConfig = &structs.TaskCSIPluginConfig{ID: plugID1} 3310 index++ 3311 err = state.UpsertAllocs(index, []*structs.Allocation{alloc1}) 3312 require.NoError(t, err) 3313 3314 plug, err = state.CSIPluginByID(ws, plugID1) 3315 require.NoError(t, err) 3316 require.Nil(t, plug, "no plugin should exist: alloc became terminal") 3317 3318 // (3) terminal alloc update, then unhealthy fingerprint 3319 plugID2 := "foo2" 3320 3321 alloc2 := mock.Alloc() 3322 alloc2.NodeID = n.ID 3323 alloc2.DesiredStatus = "stop" 3324 alloc2.ClientStatus = "complete" 3325 alloc2.Job.TaskGroups[0].Tasks[0].CSIPluginConfig = &structs.TaskCSIPluginConfig{ID: plugID2} 3326 index++ 3327 err = state.UpsertAllocs(index, []*structs.Allocation{alloc2}) 3328 require.NoError(t, err) 3329 3330 plug, err = state.CSIPluginByID(ws, plugID2) 3331 require.NoError(t, err) 3332 require.Nil(t, plug, "no plugin should exist: alloc became terminal") 3333 3334 n, _ = state.NodeByID(ws, n.ID) 3335 n.CSINodePlugins = map[string]*structs.CSIInfo{ 3336 plugID2: { 3337 PluginID: plugID2, 3338 AllocID: alloc2.ID, 3339 Healthy: false, 3340 UpdateTime: time.Now(), 3341 RequiresControllerPlugin: true, 3342 NodeInfo: &structs.CSINodeInfo{}, 3343 }, 3344 } 3345 index++ 3346 err = state.UpsertNode(index, n) 3347 require.NoError(t, err) 3348 3349 plug, err = state.CSIPluginByID(ws, plugID2) 3350 require.NoError(t, err) 3351 require.Nil(t, plug, "plugin should not exist: never became healthy") 3352 3353 } 3354 3355 // TestStateStore_CSIPluginMultiNodeUpdates tests the ordering 3356 // interactions for CSI plugins between Nomad client node updates and 3357 // allocation updates when multiple nodes are involved 3358 func TestStateStore_CSIPluginMultiNodeUpdates(t *testing.T) { 3359 t.Parallel() 3360 index := uint64(999) 3361 state := testStateStore(t) 3362 ws := memdb.NewWatchSet() 3363 3364 var err error 3365 3366 // Create Nomad client Nodes 3367 ns := []*structs.Node{mock.Node(), mock.Node()} 3368 for _, n := range ns { 3369 index++ 3370 err = state.UpsertNode(index, n) 3371 require.NoError(t, err) 3372 } 3373 3374 plugID := "foo" 3375 plugCfg := &structs.TaskCSIPluginConfig{ID: plugID} 3376 3377 // Fingerprint two running node plugins and their allocs; we'll 3378 // leave these in place for the test to ensure we don't GC the 3379 // plugin 3380 for _, n := range ns[:] { 3381 nAlloc := mock.Alloc() 3382 n, _ := state.NodeByID(ws, n.ID) 3383 n.CSINodePlugins = map[string]*structs.CSIInfo{ 3384 plugID: { 3385 PluginID: plugID, 3386 AllocID: nAlloc.ID, 3387 Healthy: true, 3388 UpdateTime: time.Now(), 3389 RequiresControllerPlugin: true, 3390 RequiresTopologies: false, 3391 NodeInfo: &structs.CSINodeInfo{}, 3392 }, 3393 } 3394 index++ 3395 err = state.UpsertNode(index, n) 3396 require.NoError(t, err) 3397 3398 nAlloc.NodeID = n.ID 3399 nAlloc.DesiredStatus = "run" 3400 nAlloc.ClientStatus = "running" 3401 nAlloc.Job.TaskGroups[0].Tasks[0].CSIPluginConfig = plugCfg 3402 3403 index++ 3404 err = state.UpsertAllocs(index, []*structs.Allocation{nAlloc}) 3405 require.NoError(t, err) 3406 } 3407 3408 // Fingerprint a running controller plugin 3409 alloc0 := mock.Alloc() 3410 n0, _ := state.NodeByID(ws, ns[0].ID) 3411 n0.CSIControllerPlugins = map[string]*structs.CSIInfo{ 3412 plugID: { 3413 PluginID: plugID, 3414 AllocID: alloc0.ID, 3415 Healthy: true, 3416 UpdateTime: time.Now(), 3417 RequiresControllerPlugin: true, 3418 RequiresTopologies: false, 3419 ControllerInfo: &structs.CSIControllerInfo{ 3420 SupportsReadOnlyAttach: true, 3421 SupportsListVolumes: true, 3422 }, 3423 }, 3424 } 3425 index++ 3426 err = state.UpsertNode(index, n0) 3427 require.NoError(t, err) 3428 3429 plug, err := state.CSIPluginByID(ws, plugID) 3430 require.NoError(t, err) 3431 require.Equal(t, 1, plug.ControllersHealthy, "controllers healthy") 3432 require.Equal(t, 1, len(plug.Controllers), "controllers expected") 3433 require.Equal(t, 2, plug.NodesHealthy, "nodes healthy") 3434 require.Equal(t, 2, len(plug.Nodes), "nodes expected") 3435 3436 n1, _ := state.NodeByID(ws, ns[1].ID) 3437 3438 alloc0.NodeID = n0.ID 3439 alloc0.DesiredStatus = "stop" 3440 alloc0.ClientStatus = "complete" 3441 alloc0.Job.TaskGroups[0].Tasks[0].CSIPluginConfig = plugCfg 3442 3443 index++ 3444 err = state.UpsertAllocs(index, []*structs.Allocation{alloc0}) 3445 require.NoError(t, err) 3446 3447 plug, err = state.CSIPluginByID(ws, plugID) 3448 require.NoError(t, err) 3449 require.Equal(t, 0, plug.ControllersHealthy, "controllers healthy") 3450 require.Equal(t, 0, len(plug.Controllers), "controllers expected") 3451 require.Equal(t, 2, plug.NodesHealthy, "nodes healthy") 3452 require.Equal(t, 2, len(plug.Nodes), "nodes expected") 3453 3454 alloc1 := mock.Alloc() 3455 alloc1.NodeID = n1.ID 3456 alloc1.DesiredStatus = "run" 3457 alloc1.ClientStatus = "running" 3458 alloc1.Job.TaskGroups[0].Tasks[0].CSIPluginConfig = plugCfg 3459 3460 index++ 3461 err = state.UpsertAllocs(index, []*structs.Allocation{alloc1}) 3462 require.NoError(t, err) 3463 3464 plug, err = state.CSIPluginByID(ws, plugID) 3465 require.NoError(t, err) 3466 require.Equal(t, 0, plug.ControllersHealthy, "controllers healthy") 3467 require.Equal(t, 0, len(plug.Controllers), "controllers expected") 3468 require.Equal(t, 2, plug.NodesHealthy, "nodes healthy") 3469 require.Equal(t, 2, len(plug.Nodes), "nodes expected") 3470 3471 n0, _ = state.NodeByID(ws, ns[0].ID) 3472 n0.CSIControllerPlugins = map[string]*structs.CSIInfo{ 3473 plugID: { 3474 PluginID: plugID, 3475 AllocID: alloc0.ID, 3476 Healthy: false, 3477 UpdateTime: time.Now(), 3478 RequiresControllerPlugin: true, 3479 RequiresTopologies: false, 3480 ControllerInfo: &structs.CSIControllerInfo{ 3481 SupportsReadOnlyAttach: true, 3482 SupportsListVolumes: true, 3483 }, 3484 }, 3485 } 3486 index++ 3487 err = state.UpsertNode(index, n0) 3488 require.NoError(t, err) 3489 3490 n1.CSIControllerPlugins = map[string]*structs.CSIInfo{ 3491 plugID: { 3492 PluginID: plugID, 3493 AllocID: alloc1.ID, 3494 Healthy: true, 3495 UpdateTime: time.Now(), 3496 RequiresControllerPlugin: true, 3497 RequiresTopologies: false, 3498 ControllerInfo: &structs.CSIControllerInfo{ 3499 SupportsReadOnlyAttach: true, 3500 SupportsListVolumes: true, 3501 }, 3502 }, 3503 } 3504 index++ 3505 err = state.UpsertNode(index, n1) 3506 require.NoError(t, err) 3507 3508 plug, err = state.CSIPluginByID(ws, plugID) 3509 require.NoError(t, err) 3510 require.True(t, plug.ControllerRequired) 3511 require.Equal(t, 1, plug.ControllersHealthy, "controllers healthy") 3512 require.Equal(t, 1, len(plug.Controllers), "controllers expected") 3513 require.Equal(t, 2, plug.NodesHealthy, "nodes healthy") 3514 require.Equal(t, 2, len(plug.Nodes), "nodes expected") 3515 3516 } 3517 3518 func TestStateStore_CSIPluginJobs(t *testing.T) { 3519 s := testStateStore(t) 3520 deleteNodes := CreateTestCSIPlugin(s, "foo") 3521 defer deleteNodes() 3522 3523 index := uint64(1001) 3524 3525 controllerJob := mock.Job() 3526 controllerJob.TaskGroups[0].Tasks[0].CSIPluginConfig = &structs.TaskCSIPluginConfig{ 3527 ID: "foo", 3528 Type: structs.CSIPluginTypeController, 3529 } 3530 3531 nodeJob := mock.Job() 3532 nodeJob.TaskGroups[0].Tasks[0].CSIPluginConfig = &structs.TaskCSIPluginConfig{ 3533 ID: "foo", 3534 Type: structs.CSIPluginTypeNode, 3535 } 3536 3537 err := s.UpsertJob(index, controllerJob) 3538 require.NoError(t, err) 3539 index++ 3540 3541 err = s.UpsertJob(index, nodeJob) 3542 require.NoError(t, err) 3543 index++ 3544 3545 // Get the plugin, and make better fake allocations for it 3546 ws := memdb.NewWatchSet() 3547 plug, err := s.CSIPluginByID(ws, "foo") 3548 require.NoError(t, err) 3549 index++ 3550 3551 as := []*structs.Allocation{} 3552 for id, info := range plug.Controllers { 3553 as = append(as, &structs.Allocation{ 3554 ID: info.AllocID, 3555 Namespace: controllerJob.Namespace, 3556 JobID: controllerJob.ID, 3557 Job: controllerJob, 3558 TaskGroup: "web", 3559 EvalID: uuid.Generate(), 3560 NodeID: id, 3561 }) 3562 } 3563 for id, info := range plug.Nodes { 3564 as = append(as, &structs.Allocation{ 3565 ID: info.AllocID, 3566 JobID: nodeJob.ID, 3567 Namespace: nodeJob.Namespace, 3568 Job: nodeJob, 3569 TaskGroup: "web", 3570 EvalID: uuid.Generate(), 3571 NodeID: id, 3572 }) 3573 } 3574 3575 err = s.UpsertAllocs(index, as) 3576 require.NoError(t, err) 3577 index++ 3578 3579 // Delete a job 3580 err = s.DeleteJob(index, controllerJob.Namespace, controllerJob.ID) 3581 require.NoError(t, err) 3582 index++ 3583 3584 // plugin still exists 3585 plug, err = s.CSIPluginByID(ws, "foo") 3586 require.NoError(t, err) 3587 require.NotNil(t, plug) 3588 require.Equal(t, 0, len(plug.Controllers)) 3589 3590 // Delete a job 3591 err = s.DeleteJob(index, nodeJob.Namespace, nodeJob.ID) 3592 require.NoError(t, err) 3593 index++ 3594 3595 // plugin was collected 3596 plug, err = s.CSIPluginByID(ws, "foo") 3597 require.NoError(t, err) 3598 require.Nil(t, plug) 3599 } 3600 3601 func TestStateStore_RestoreCSIPlugin(t *testing.T) { 3602 t.Parallel() 3603 require := require.New(t) 3604 3605 state := testStateStore(t) 3606 plugin := mock.CSIPlugin() 3607 3608 restore, err := state.Restore() 3609 require.NoError(err) 3610 3611 err = restore.CSIPluginRestore(plugin) 3612 require.NoError(err) 3613 restore.Commit() 3614 3615 ws := memdb.NewWatchSet() 3616 out, err := state.CSIPluginByID(ws, plugin.ID) 3617 require.NoError(err) 3618 require.EqualValues(out, plugin) 3619 } 3620 3621 func TestStateStore_RestoreCSIVolume(t *testing.T) { 3622 t.Parallel() 3623 require := require.New(t) 3624 3625 state := testStateStore(t) 3626 plugin := mock.CSIPlugin() 3627 volume := mock.CSIVolume(plugin) 3628 3629 restore, err := state.Restore() 3630 require.NoError(err) 3631 3632 err = restore.CSIVolumeRestore(volume) 3633 require.NoError(err) 3634 restore.Commit() 3635 3636 ws := memdb.NewWatchSet() 3637 out, err := state.CSIVolumeByID(ws, "default", volume.ID) 3638 require.NoError(err) 3639 require.EqualValues(out, volume) 3640 } 3641 3642 func TestStateStore_Indexes(t *testing.T) { 3643 t.Parallel() 3644 3645 state := testStateStore(t) 3646 node := mock.Node() 3647 3648 err := state.UpsertNode(1000, node) 3649 if err != nil { 3650 t.Fatalf("err: %v", err) 3651 } 3652 3653 iter, err := state.Indexes() 3654 if err != nil { 3655 t.Fatalf("err: %v", err) 3656 } 3657 3658 var out []*IndexEntry 3659 for { 3660 raw := iter.Next() 3661 if raw == nil { 3662 break 3663 } 3664 out = append(out, raw.(*IndexEntry)) 3665 } 3666 3667 expect := &IndexEntry{"nodes", 1000} 3668 if l := len(out); l < 1 { 3669 t.Fatalf("unexpected number of index entries: %v", pretty.Sprint(out)) 3670 } 3671 3672 for _, index := range out { 3673 if index.Key != expect.Key { 3674 continue 3675 } 3676 if index.Value != expect.Value { 3677 t.Fatalf("bad index; got %d; want %d", index.Value, expect.Value) 3678 } 3679 3680 // We matched 3681 return 3682 } 3683 3684 t.Fatal("did not find expected index entry") 3685 } 3686 3687 func TestStateStore_LatestIndex(t *testing.T) { 3688 t.Parallel() 3689 3690 state := testStateStore(t) 3691 3692 if err := state.UpsertNode(1000, mock.Node()); err != nil { 3693 t.Fatalf("err: %v", err) 3694 } 3695 3696 exp := uint64(2000) 3697 if err := state.UpsertJob(exp, mock.Job()); err != nil { 3698 t.Fatalf("err: %v", err) 3699 } 3700 3701 latest, err := state.LatestIndex() 3702 if err != nil { 3703 t.Fatalf("err: %v", err) 3704 } 3705 3706 if latest != exp { 3707 t.Fatalf("LatestIndex() returned %d; want %d", latest, exp) 3708 } 3709 } 3710 3711 func TestStateStore_RestoreIndex(t *testing.T) { 3712 t.Parallel() 3713 3714 state := testStateStore(t) 3715 3716 restore, err := state.Restore() 3717 if err != nil { 3718 t.Fatalf("err: %v", err) 3719 } 3720 3721 index := &IndexEntry{"jobs", 1000} 3722 err = restore.IndexRestore(index) 3723 if err != nil { 3724 t.Fatalf("err: %v", err) 3725 } 3726 3727 restore.Commit() 3728 3729 out, err := state.Index("jobs") 3730 if err != nil { 3731 t.Fatalf("err: %v", err) 3732 } 3733 3734 if out != 1000 { 3735 t.Fatalf("Bad: %#v %#v", out, 1000) 3736 } 3737 } 3738 3739 func TestStateStore_UpsertEvals_Eval(t *testing.T) { 3740 t.Parallel() 3741 3742 state := testStateStore(t) 3743 eval := mock.Eval() 3744 3745 // Create a watchset so we can test that upsert fires the watch 3746 ws := memdb.NewWatchSet() 3747 if _, err := state.EvalByID(ws, eval.ID); err != nil { 3748 t.Fatalf("bad: %v", err) 3749 } 3750 3751 err := state.UpsertEvals(1000, []*structs.Evaluation{eval}) 3752 if err != nil { 3753 t.Fatalf("err: %v", err) 3754 } 3755 3756 if !watchFired(ws) { 3757 t.Fatalf("bad") 3758 } 3759 3760 ws = memdb.NewWatchSet() 3761 out, err := state.EvalByID(ws, eval.ID) 3762 if err != nil { 3763 t.Fatalf("err: %v", err) 3764 } 3765 3766 if !reflect.DeepEqual(eval, out) { 3767 t.Fatalf("bad: %#v %#v", eval, out) 3768 } 3769 3770 index, err := state.Index("evals") 3771 if err != nil { 3772 t.Fatalf("err: %v", err) 3773 } 3774 if index != 1000 { 3775 t.Fatalf("bad: %d", index) 3776 } 3777 3778 if watchFired(ws) { 3779 t.Fatalf("bad") 3780 } 3781 } 3782 3783 func TestStateStore_UpsertEvals_CancelBlocked(t *testing.T) { 3784 t.Parallel() 3785 3786 state := testStateStore(t) 3787 3788 // Create two blocked evals for the same job 3789 j := "test-job" 3790 b1, b2 := mock.Eval(), mock.Eval() 3791 b1.JobID = j 3792 b1.Status = structs.EvalStatusBlocked 3793 b2.JobID = j 3794 b2.Status = structs.EvalStatusBlocked 3795 3796 err := state.UpsertEvals(999, []*structs.Evaluation{b1, b2}) 3797 if err != nil { 3798 t.Fatalf("err: %v", err) 3799 } 3800 3801 // Create one complete and successful eval for the job 3802 eval := mock.Eval() 3803 eval.JobID = j 3804 eval.Status = structs.EvalStatusComplete 3805 3806 // Create a watchset so we can test that the upsert of the complete eval 3807 // fires the watch 3808 ws := memdb.NewWatchSet() 3809 if _, err := state.EvalByID(ws, b1.ID); err != nil { 3810 t.Fatalf("bad: %v", err) 3811 } 3812 if _, err := state.EvalByID(ws, b2.ID); err != nil { 3813 t.Fatalf("bad: %v", err) 3814 } 3815 3816 if err := state.UpsertEvals(1000, []*structs.Evaluation{eval}); err != nil { 3817 t.Fatalf("err: %v", err) 3818 } 3819 3820 if !watchFired(ws) { 3821 t.Fatalf("bad") 3822 } 3823 3824 ws = memdb.NewWatchSet() 3825 out, err := state.EvalByID(ws, eval.ID) 3826 if err != nil { 3827 t.Fatalf("err: %v", err) 3828 } 3829 3830 if !reflect.DeepEqual(eval, out) { 3831 t.Fatalf("bad: %#v %#v", eval, out) 3832 } 3833 3834 index, err := state.Index("evals") 3835 if err != nil { 3836 t.Fatalf("err: %v", err) 3837 } 3838 if index != 1000 { 3839 t.Fatalf("bad: %d", index) 3840 } 3841 3842 // Get b1/b2 and check they are cancelled 3843 out1, err := state.EvalByID(ws, b1.ID) 3844 if err != nil { 3845 t.Fatalf("err: %v", err) 3846 } 3847 3848 out2, err := state.EvalByID(ws, b2.ID) 3849 if err != nil { 3850 t.Fatalf("err: %v", err) 3851 } 3852 3853 if out1.Status != structs.EvalStatusCancelled || out2.Status != structs.EvalStatusCancelled { 3854 t.Fatalf("bad: %#v %#v", out1, out2) 3855 } 3856 3857 if watchFired(ws) { 3858 t.Fatalf("bad") 3859 } 3860 } 3861 3862 func TestStateStore_Update_UpsertEvals_Eval(t *testing.T) { 3863 t.Parallel() 3864 3865 state := testStateStore(t) 3866 eval := mock.Eval() 3867 3868 err := state.UpsertEvals(1000, []*structs.Evaluation{eval}) 3869 if err != nil { 3870 t.Fatalf("err: %v", err) 3871 } 3872 3873 // Create a watchset so we can test that delete fires the watch 3874 ws := memdb.NewWatchSet() 3875 ws2 := memdb.NewWatchSet() 3876 if _, err := state.EvalByID(ws, eval.ID); err != nil { 3877 t.Fatalf("bad: %v", err) 3878 } 3879 3880 if _, err := state.EvalsByJob(ws2, eval.Namespace, eval.JobID); err != nil { 3881 t.Fatalf("bad: %v", err) 3882 } 3883 3884 eval2 := mock.Eval() 3885 eval2.ID = eval.ID 3886 eval2.JobID = eval.JobID 3887 err = state.UpsertEvals(1001, []*structs.Evaluation{eval2}) 3888 if err != nil { 3889 t.Fatalf("err: %v", err) 3890 } 3891 3892 if !watchFired(ws) { 3893 t.Fatalf("bad") 3894 } 3895 if !watchFired(ws2) { 3896 t.Fatalf("bad") 3897 } 3898 3899 ws = memdb.NewWatchSet() 3900 out, err := state.EvalByID(ws, eval.ID) 3901 if err != nil { 3902 t.Fatalf("err: %v", err) 3903 } 3904 3905 if !reflect.DeepEqual(eval2, out) { 3906 t.Fatalf("bad: %#v %#v", eval2, out) 3907 } 3908 3909 if out.CreateIndex != 1000 { 3910 t.Fatalf("bad: %#v", out) 3911 } 3912 if out.ModifyIndex != 1001 { 3913 t.Fatalf("bad: %#v", out) 3914 } 3915 3916 index, err := state.Index("evals") 3917 if err != nil { 3918 t.Fatalf("err: %v", err) 3919 } 3920 if index != 1001 { 3921 t.Fatalf("bad: %d", index) 3922 } 3923 3924 if watchFired(ws) { 3925 t.Fatalf("bad") 3926 } 3927 } 3928 3929 func TestStateStore_UpsertEvals_Eval_ChildJob(t *testing.T) { 3930 t.Parallel() 3931 3932 state := testStateStore(t) 3933 3934 parent := mock.Job() 3935 if err := state.UpsertJob(998, parent); err != nil { 3936 t.Fatalf("err: %v", err) 3937 } 3938 3939 child := mock.Job() 3940 child.ParentID = parent.ID 3941 3942 if err := state.UpsertJob(999, child); err != nil { 3943 t.Fatalf("err: %v", err) 3944 } 3945 3946 eval := mock.Eval() 3947 eval.Status = structs.EvalStatusComplete 3948 eval.JobID = child.ID 3949 3950 // Create watchsets so we can test that upsert fires the watch 3951 ws := memdb.NewWatchSet() 3952 ws2 := memdb.NewWatchSet() 3953 ws3 := memdb.NewWatchSet() 3954 if _, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID); err != nil { 3955 t.Fatalf("bad: %v", err) 3956 } 3957 if _, err := state.EvalByID(ws2, eval.ID); err != nil { 3958 t.Fatalf("bad: %v", err) 3959 } 3960 if _, err := state.EvalsByJob(ws3, eval.Namespace, eval.JobID); err != nil { 3961 t.Fatalf("bad: %v", err) 3962 } 3963 3964 err := state.UpsertEvals(1000, []*structs.Evaluation{eval}) 3965 if err != nil { 3966 t.Fatalf("err: %v", err) 3967 } 3968 3969 if !watchFired(ws) { 3970 t.Fatalf("bad") 3971 } 3972 if !watchFired(ws2) { 3973 t.Fatalf("bad") 3974 } 3975 if !watchFired(ws3) { 3976 t.Fatalf("bad") 3977 } 3978 3979 ws = memdb.NewWatchSet() 3980 out, err := state.EvalByID(ws, eval.ID) 3981 if err != nil { 3982 t.Fatalf("err: %v", err) 3983 } 3984 3985 if !reflect.DeepEqual(eval, out) { 3986 t.Fatalf("bad: %#v %#v", eval, out) 3987 } 3988 3989 index, err := state.Index("evals") 3990 if err != nil { 3991 t.Fatalf("err: %v", err) 3992 } 3993 if index != 1000 { 3994 t.Fatalf("bad: %d", index) 3995 } 3996 3997 summary, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID) 3998 if err != nil { 3999 t.Fatalf("err: %v", err) 4000 } 4001 if summary == nil { 4002 t.Fatalf("nil summary") 4003 } 4004 if summary.JobID != parent.ID { 4005 t.Fatalf("bad summary id: %v", parent.ID) 4006 } 4007 if summary.Children == nil { 4008 t.Fatalf("nil children summary") 4009 } 4010 if summary.Children.Pending != 0 || summary.Children.Running != 0 || summary.Children.Dead != 1 { 4011 t.Fatalf("bad children summary: %v", summary.Children) 4012 } 4013 4014 if watchFired(ws) { 4015 t.Fatalf("bad") 4016 } 4017 } 4018 4019 func TestStateStore_DeleteEval_Eval(t *testing.T) { 4020 t.Parallel() 4021 4022 state := testStateStore(t) 4023 eval1 := mock.Eval() 4024 eval2 := mock.Eval() 4025 alloc1 := mock.Alloc() 4026 alloc2 := mock.Alloc() 4027 4028 // Create watchsets so we can test that upsert fires the watch 4029 watches := make([]memdb.WatchSet, 12) 4030 for i := 0; i < 12; i++ { 4031 watches[i] = memdb.NewWatchSet() 4032 } 4033 if _, err := state.EvalByID(watches[0], eval1.ID); err != nil { 4034 t.Fatalf("bad: %v", err) 4035 } 4036 if _, err := state.EvalByID(watches[1], eval2.ID); err != nil { 4037 t.Fatalf("bad: %v", err) 4038 } 4039 if _, err := state.EvalsByJob(watches[2], eval1.Namespace, eval1.JobID); err != nil { 4040 t.Fatalf("bad: %v", err) 4041 } 4042 if _, err := state.EvalsByJob(watches[3], eval2.Namespace, eval2.JobID); err != nil { 4043 t.Fatalf("bad: %v", err) 4044 } 4045 if _, err := state.AllocByID(watches[4], alloc1.ID); err != nil { 4046 t.Fatalf("bad: %v", err) 4047 } 4048 if _, err := state.AllocByID(watches[5], alloc2.ID); err != nil { 4049 t.Fatalf("bad: %v", err) 4050 } 4051 if _, err := state.AllocsByEval(watches[6], alloc1.EvalID); err != nil { 4052 t.Fatalf("bad: %v", err) 4053 } 4054 if _, err := state.AllocsByEval(watches[7], alloc2.EvalID); err != nil { 4055 t.Fatalf("bad: %v", err) 4056 } 4057 if _, err := state.AllocsByJob(watches[8], alloc1.Namespace, alloc1.JobID, false); err != nil { 4058 t.Fatalf("bad: %v", err) 4059 } 4060 if _, err := state.AllocsByJob(watches[9], alloc2.Namespace, alloc2.JobID, false); err != nil { 4061 t.Fatalf("bad: %v", err) 4062 } 4063 if _, err := state.AllocsByNode(watches[10], alloc1.NodeID); err != nil { 4064 t.Fatalf("bad: %v", err) 4065 } 4066 if _, err := state.AllocsByNode(watches[11], alloc2.NodeID); err != nil { 4067 t.Fatalf("bad: %v", err) 4068 } 4069 4070 state.UpsertJobSummary(900, mock.JobSummary(eval1.JobID)) 4071 state.UpsertJobSummary(901, mock.JobSummary(eval2.JobID)) 4072 state.UpsertJobSummary(902, mock.JobSummary(alloc1.JobID)) 4073 state.UpsertJobSummary(903, mock.JobSummary(alloc2.JobID)) 4074 err := state.UpsertEvals(1000, []*structs.Evaluation{eval1, eval2}) 4075 if err != nil { 4076 t.Fatalf("err: %v", err) 4077 } 4078 4079 err = state.UpsertAllocs(1001, []*structs.Allocation{alloc1, alloc2}) 4080 if err != nil { 4081 t.Fatalf("err: %v", err) 4082 } 4083 4084 err = state.DeleteEval(1002, []string{eval1.ID, eval2.ID}, []string{alloc1.ID, alloc2.ID}) 4085 if err != nil { 4086 t.Fatalf("err: %v", err) 4087 } 4088 4089 for i, ws := range watches { 4090 if !watchFired(ws) { 4091 t.Fatalf("bad %d", i) 4092 } 4093 } 4094 4095 ws := memdb.NewWatchSet() 4096 out, err := state.EvalByID(ws, eval1.ID) 4097 if err != nil { 4098 t.Fatalf("err: %v", err) 4099 } 4100 4101 if out != nil { 4102 t.Fatalf("bad: %#v %#v", eval1, out) 4103 } 4104 4105 out, err = state.EvalByID(ws, eval2.ID) 4106 if err != nil { 4107 t.Fatalf("err: %v", err) 4108 } 4109 4110 if out != nil { 4111 t.Fatalf("bad: %#v %#v", eval1, out) 4112 } 4113 4114 outA, err := state.AllocByID(ws, alloc1.ID) 4115 if err != nil { 4116 t.Fatalf("err: %v", err) 4117 } 4118 4119 if out != nil { 4120 t.Fatalf("bad: %#v %#v", alloc1, outA) 4121 } 4122 4123 outA, err = state.AllocByID(ws, alloc2.ID) 4124 if err != nil { 4125 t.Fatalf("err: %v", err) 4126 } 4127 4128 if out != nil { 4129 t.Fatalf("bad: %#v %#v", alloc1, outA) 4130 } 4131 4132 index, err := state.Index("evals") 4133 if err != nil { 4134 t.Fatalf("err: %v", err) 4135 } 4136 if index != 1002 { 4137 t.Fatalf("bad: %d", index) 4138 } 4139 4140 index, err = state.Index("allocs") 4141 if err != nil { 4142 t.Fatalf("err: %v", err) 4143 } 4144 if index != 1002 { 4145 t.Fatalf("bad: %d", index) 4146 } 4147 4148 if watchFired(ws) { 4149 t.Fatalf("bad") 4150 } 4151 } 4152 4153 func TestStateStore_DeleteEval_ChildJob(t *testing.T) { 4154 t.Parallel() 4155 4156 state := testStateStore(t) 4157 4158 parent := mock.Job() 4159 if err := state.UpsertJob(998, parent); err != nil { 4160 t.Fatalf("err: %v", err) 4161 } 4162 4163 child := mock.Job() 4164 child.ParentID = parent.ID 4165 4166 if err := state.UpsertJob(999, child); err != nil { 4167 t.Fatalf("err: %v", err) 4168 } 4169 4170 eval1 := mock.Eval() 4171 eval1.JobID = child.ID 4172 alloc1 := mock.Alloc() 4173 alloc1.JobID = child.ID 4174 4175 err := state.UpsertEvals(1000, []*structs.Evaluation{eval1}) 4176 if err != nil { 4177 t.Fatalf("err: %v", err) 4178 } 4179 4180 err = state.UpsertAllocs(1001, []*structs.Allocation{alloc1}) 4181 if err != nil { 4182 t.Fatalf("err: %v", err) 4183 } 4184 4185 // Create watchsets so we can test that delete fires the watch 4186 ws := memdb.NewWatchSet() 4187 if _, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID); err != nil { 4188 t.Fatalf("bad: %v", err) 4189 } 4190 4191 err = state.DeleteEval(1002, []string{eval1.ID}, []string{alloc1.ID}) 4192 if err != nil { 4193 t.Fatalf("err: %v", err) 4194 } 4195 4196 if !watchFired(ws) { 4197 t.Fatalf("bad") 4198 } 4199 4200 ws = memdb.NewWatchSet() 4201 summary, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID) 4202 if err != nil { 4203 t.Fatalf("err: %v", err) 4204 } 4205 if summary == nil { 4206 t.Fatalf("nil summary") 4207 } 4208 if summary.JobID != parent.ID { 4209 t.Fatalf("bad summary id: %v", parent.ID) 4210 } 4211 if summary.Children == nil { 4212 t.Fatalf("nil children summary") 4213 } 4214 if summary.Children.Pending != 0 || summary.Children.Running != 0 || summary.Children.Dead != 1 { 4215 t.Fatalf("bad children summary: %v", summary.Children) 4216 } 4217 4218 if watchFired(ws) { 4219 t.Fatalf("bad") 4220 } 4221 } 4222 4223 func TestStateStore_EvalsByJob(t *testing.T) { 4224 t.Parallel() 4225 4226 state := testStateStore(t) 4227 4228 eval1 := mock.Eval() 4229 eval2 := mock.Eval() 4230 eval2.JobID = eval1.JobID 4231 eval3 := mock.Eval() 4232 evals := []*structs.Evaluation{eval1, eval2} 4233 4234 err := state.UpsertEvals(1000, evals) 4235 if err != nil { 4236 t.Fatalf("err: %v", err) 4237 } 4238 err = state.UpsertEvals(1001, []*structs.Evaluation{eval3}) 4239 if err != nil { 4240 t.Fatalf("err: %v", err) 4241 } 4242 4243 ws := memdb.NewWatchSet() 4244 out, err := state.EvalsByJob(ws, eval1.Namespace, eval1.JobID) 4245 if err != nil { 4246 t.Fatalf("err: %v", err) 4247 } 4248 4249 sort.Sort(EvalIDSort(evals)) 4250 sort.Sort(EvalIDSort(out)) 4251 4252 if !reflect.DeepEqual(evals, out) { 4253 t.Fatalf("bad: %#v %#v", evals, out) 4254 } 4255 4256 if watchFired(ws) { 4257 t.Fatalf("bad") 4258 } 4259 } 4260 4261 func TestStateStore_Evals(t *testing.T) { 4262 t.Parallel() 4263 4264 state := testStateStore(t) 4265 var evals []*structs.Evaluation 4266 4267 for i := 0; i < 10; i++ { 4268 eval := mock.Eval() 4269 evals = append(evals, eval) 4270 4271 err := state.UpsertEvals(1000+uint64(i), []*structs.Evaluation{eval}) 4272 if err != nil { 4273 t.Fatalf("err: %v", err) 4274 } 4275 } 4276 4277 ws := memdb.NewWatchSet() 4278 iter, err := state.Evals(ws) 4279 if err != nil { 4280 t.Fatalf("err: %v", err) 4281 } 4282 4283 var out []*structs.Evaluation 4284 for { 4285 raw := iter.Next() 4286 if raw == nil { 4287 break 4288 } 4289 out = append(out, raw.(*structs.Evaluation)) 4290 } 4291 4292 sort.Sort(EvalIDSort(evals)) 4293 sort.Sort(EvalIDSort(out)) 4294 4295 if !reflect.DeepEqual(evals, out) { 4296 t.Fatalf("bad: %#v %#v", evals, out) 4297 } 4298 4299 if watchFired(ws) { 4300 t.Fatalf("bad") 4301 } 4302 } 4303 4304 func TestStateStore_EvalsByIDPrefix(t *testing.T) { 4305 t.Parallel() 4306 4307 state := testStateStore(t) 4308 var evals []*structs.Evaluation 4309 4310 ids := []string{ 4311 "aaaaaaaa-7bfb-395d-eb95-0685af2176b2", 4312 "aaaaaaab-7bfb-395d-eb95-0685af2176b2", 4313 "aaaaaabb-7bfb-395d-eb95-0685af2176b2", 4314 "aaaaabbb-7bfb-395d-eb95-0685af2176b2", 4315 "aaaabbbb-7bfb-395d-eb95-0685af2176b2", 4316 "aaabbbbb-7bfb-395d-eb95-0685af2176b2", 4317 "aabbbbbb-7bfb-395d-eb95-0685af2176b2", 4318 "abbbbbbb-7bfb-395d-eb95-0685af2176b2", 4319 "bbbbbbbb-7bfb-395d-eb95-0685af2176b2", 4320 } 4321 for i := 0; i < 9; i++ { 4322 eval := mock.Eval() 4323 eval.ID = ids[i] 4324 evals = append(evals, eval) 4325 } 4326 4327 err := state.UpsertEvals(1000, evals) 4328 if err != nil { 4329 t.Fatalf("err: %v", err) 4330 } 4331 4332 ws := memdb.NewWatchSet() 4333 iter, err := state.EvalsByIDPrefix(ws, structs.DefaultNamespace, "aaaa") 4334 if err != nil { 4335 t.Fatalf("err: %v", err) 4336 } 4337 4338 gatherEvals := func(iter memdb.ResultIterator) []*structs.Evaluation { 4339 var evals []*structs.Evaluation 4340 for { 4341 raw := iter.Next() 4342 if raw == nil { 4343 break 4344 } 4345 evals = append(evals, raw.(*structs.Evaluation)) 4346 } 4347 return evals 4348 } 4349 4350 out := gatherEvals(iter) 4351 if len(out) != 5 { 4352 t.Fatalf("bad: expected five evaluations, got: %#v", out) 4353 } 4354 4355 sort.Sort(EvalIDSort(evals)) 4356 4357 for index, eval := range out { 4358 if ids[index] != eval.ID { 4359 t.Fatalf("bad: got unexpected id: %s", eval.ID) 4360 } 4361 } 4362 4363 iter, err = state.EvalsByIDPrefix(ws, structs.DefaultNamespace, "b-a7bfb") 4364 if err != nil { 4365 t.Fatalf("err: %v", err) 4366 } 4367 4368 out = gatherEvals(iter) 4369 if len(out) != 0 { 4370 t.Fatalf("bad: unexpected zero evaluations, got: %#v", out) 4371 } 4372 4373 if watchFired(ws) { 4374 t.Fatalf("bad") 4375 } 4376 } 4377 4378 func TestStateStore_RestoreEval(t *testing.T) { 4379 t.Parallel() 4380 4381 state := testStateStore(t) 4382 eval := mock.Eval() 4383 4384 restore, err := state.Restore() 4385 if err != nil { 4386 t.Fatalf("err: %v", err) 4387 } 4388 4389 err = restore.EvalRestore(eval) 4390 if err != nil { 4391 t.Fatalf("err: %v", err) 4392 } 4393 restore.Commit() 4394 4395 ws := memdb.NewWatchSet() 4396 out, err := state.EvalByID(ws, eval.ID) 4397 if err != nil { 4398 t.Fatalf("err: %v", err) 4399 } 4400 4401 if !reflect.DeepEqual(out, eval) { 4402 t.Fatalf("Bad: %#v %#v", out, eval) 4403 } 4404 } 4405 4406 func TestStateStore_UpdateAllocsFromClient(t *testing.T) { 4407 t.Parallel() 4408 4409 state := testStateStore(t) 4410 parent := mock.Job() 4411 if err := state.UpsertJob(998, parent); err != nil { 4412 t.Fatalf("err: %v", err) 4413 } 4414 4415 child := mock.Job() 4416 child.ParentID = parent.ID 4417 if err := state.UpsertJob(999, child); err != nil { 4418 t.Fatalf("err: %v", err) 4419 } 4420 4421 alloc := mock.Alloc() 4422 alloc.JobID = child.ID 4423 alloc.Job = child 4424 4425 err := state.UpsertAllocs(1000, []*structs.Allocation{alloc}) 4426 if err != nil { 4427 t.Fatalf("err: %v", err) 4428 } 4429 4430 ws := memdb.NewWatchSet() 4431 summary, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID) 4432 if err != nil { 4433 t.Fatalf("err: %v", err) 4434 } 4435 if summary == nil { 4436 t.Fatalf("nil summary") 4437 } 4438 if summary.JobID != parent.ID { 4439 t.Fatalf("bad summary id: %v", parent.ID) 4440 } 4441 if summary.Children == nil { 4442 t.Fatalf("nil children summary") 4443 } 4444 if summary.Children.Pending != 0 || summary.Children.Running != 1 || summary.Children.Dead != 0 { 4445 t.Fatalf("bad children summary: %v", summary.Children) 4446 } 4447 4448 // Create watchsets so we can test that update fires the watch 4449 ws = memdb.NewWatchSet() 4450 if _, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID); err != nil { 4451 t.Fatalf("bad: %v", err) 4452 } 4453 4454 // Create the delta updates 4455 ts := map[string]*structs.TaskState{"web": {State: structs.TaskStateRunning}} 4456 update := &structs.Allocation{ 4457 ID: alloc.ID, 4458 ClientStatus: structs.AllocClientStatusComplete, 4459 TaskStates: ts, 4460 JobID: alloc.JobID, 4461 TaskGroup: alloc.TaskGroup, 4462 } 4463 err = state.UpdateAllocsFromClient(1001, []*structs.Allocation{update}) 4464 if err != nil { 4465 t.Fatalf("err: %v", err) 4466 } 4467 4468 if !watchFired(ws) { 4469 t.Fatalf("bad") 4470 } 4471 4472 ws = memdb.NewWatchSet() 4473 summary, err = state.JobSummaryByID(ws, parent.Namespace, parent.ID) 4474 if err != nil { 4475 t.Fatalf("err: %v", err) 4476 } 4477 if summary == nil { 4478 t.Fatalf("nil summary") 4479 } 4480 if summary.JobID != parent.ID { 4481 t.Fatalf("bad summary id: %v", parent.ID) 4482 } 4483 if summary.Children == nil { 4484 t.Fatalf("nil children summary") 4485 } 4486 if summary.Children.Pending != 0 || summary.Children.Running != 0 || summary.Children.Dead != 1 { 4487 t.Fatalf("bad children summary: %v", summary.Children) 4488 } 4489 4490 if watchFired(ws) { 4491 t.Fatalf("bad") 4492 } 4493 } 4494 4495 func TestStateStore_UpdateAllocsFromClient_ChildJob(t *testing.T) { 4496 t.Parallel() 4497 4498 state := testStateStore(t) 4499 alloc1 := mock.Alloc() 4500 alloc2 := mock.Alloc() 4501 4502 if err := state.UpsertJob(999, alloc1.Job); err != nil { 4503 t.Fatalf("err: %v", err) 4504 } 4505 if err := state.UpsertJob(999, alloc2.Job); err != nil { 4506 t.Fatalf("err: %v", err) 4507 } 4508 4509 err := state.UpsertAllocs(1000, []*structs.Allocation{alloc1, alloc2}) 4510 if err != nil { 4511 t.Fatalf("err: %v", err) 4512 } 4513 4514 // Create watchsets so we can test that update fires the watch 4515 watches := make([]memdb.WatchSet, 8) 4516 for i := 0; i < 8; i++ { 4517 watches[i] = memdb.NewWatchSet() 4518 } 4519 if _, err := state.AllocByID(watches[0], alloc1.ID); err != nil { 4520 t.Fatalf("bad: %v", err) 4521 } 4522 if _, err := state.AllocByID(watches[1], alloc2.ID); err != nil { 4523 t.Fatalf("bad: %v", err) 4524 } 4525 if _, err := state.AllocsByEval(watches[2], alloc1.EvalID); err != nil { 4526 t.Fatalf("bad: %v", err) 4527 } 4528 if _, err := state.AllocsByEval(watches[3], alloc2.EvalID); err != nil { 4529 t.Fatalf("bad: %v", err) 4530 } 4531 if _, err := state.AllocsByJob(watches[4], alloc1.Namespace, alloc1.JobID, false); err != nil { 4532 t.Fatalf("bad: %v", err) 4533 } 4534 if _, err := state.AllocsByJob(watches[5], alloc2.Namespace, alloc2.JobID, false); err != nil { 4535 t.Fatalf("bad: %v", err) 4536 } 4537 if _, err := state.AllocsByNode(watches[6], alloc1.NodeID); err != nil { 4538 t.Fatalf("bad: %v", err) 4539 } 4540 if _, err := state.AllocsByNode(watches[7], alloc2.NodeID); err != nil { 4541 t.Fatalf("bad: %v", err) 4542 } 4543 4544 // Create the delta updates 4545 ts := map[string]*structs.TaskState{"web": {State: structs.TaskStatePending}} 4546 update := &structs.Allocation{ 4547 ID: alloc1.ID, 4548 ClientStatus: structs.AllocClientStatusFailed, 4549 TaskStates: ts, 4550 JobID: alloc1.JobID, 4551 TaskGroup: alloc1.TaskGroup, 4552 } 4553 update2 := &structs.Allocation{ 4554 ID: alloc2.ID, 4555 ClientStatus: structs.AllocClientStatusRunning, 4556 TaskStates: ts, 4557 JobID: alloc2.JobID, 4558 TaskGroup: alloc2.TaskGroup, 4559 } 4560 4561 err = state.UpdateAllocsFromClient(1001, []*structs.Allocation{update, update2}) 4562 if err != nil { 4563 t.Fatalf("err: %v", err) 4564 } 4565 4566 for i, ws := range watches { 4567 if !watchFired(ws) { 4568 t.Fatalf("bad %d", i) 4569 } 4570 } 4571 4572 ws := memdb.NewWatchSet() 4573 out, err := state.AllocByID(ws, alloc1.ID) 4574 if err != nil { 4575 t.Fatalf("err: %v", err) 4576 } 4577 4578 alloc1.CreateIndex = 1000 4579 alloc1.ModifyIndex = 1001 4580 alloc1.TaskStates = ts 4581 alloc1.ClientStatus = structs.AllocClientStatusFailed 4582 if !reflect.DeepEqual(alloc1, out) { 4583 t.Fatalf("bad: %#v %#v", alloc1, out) 4584 } 4585 4586 out, err = state.AllocByID(ws, alloc2.ID) 4587 if err != nil { 4588 t.Fatalf("err: %v", err) 4589 } 4590 4591 alloc2.ModifyIndex = 1000 4592 alloc2.ModifyIndex = 1001 4593 alloc2.ClientStatus = structs.AllocClientStatusRunning 4594 alloc2.TaskStates = ts 4595 if !reflect.DeepEqual(alloc2, out) { 4596 t.Fatalf("bad: %#v %#v", alloc2, out) 4597 } 4598 4599 index, err := state.Index("allocs") 4600 if err != nil { 4601 t.Fatalf("err: %v", err) 4602 } 4603 if index != 1001 { 4604 t.Fatalf("bad: %d", index) 4605 } 4606 4607 // Ensure summaries have been updated 4608 summary, err := state.JobSummaryByID(ws, alloc1.Namespace, alloc1.JobID) 4609 if err != nil { 4610 t.Fatalf("err: %v", err) 4611 } 4612 tgSummary := summary.Summary["web"] 4613 if tgSummary.Failed != 1 { 4614 t.Fatalf("expected failed: %v, actual: %v, summary: %#v", 1, tgSummary.Failed, tgSummary) 4615 } 4616 4617 summary2, err := state.JobSummaryByID(ws, alloc2.Namespace, alloc2.JobID) 4618 if err != nil { 4619 t.Fatalf("err: %v", err) 4620 } 4621 tgSummary2 := summary2.Summary["web"] 4622 if tgSummary2.Running != 1 { 4623 t.Fatalf("expected running: %v, actual: %v", 1, tgSummary2.Running) 4624 } 4625 4626 if watchFired(ws) { 4627 t.Fatalf("bad") 4628 } 4629 } 4630 4631 func TestStateStore_UpdateMultipleAllocsFromClient(t *testing.T) { 4632 t.Parallel() 4633 4634 state := testStateStore(t) 4635 alloc := mock.Alloc() 4636 4637 if err := state.UpsertJob(999, alloc.Job); err != nil { 4638 t.Fatalf("err: %v", err) 4639 } 4640 err := state.UpsertAllocs(1000, []*structs.Allocation{alloc}) 4641 if err != nil { 4642 t.Fatalf("err: %v", err) 4643 } 4644 4645 // Create the delta updates 4646 ts := map[string]*structs.TaskState{"web": {State: structs.TaskStatePending}} 4647 update := &structs.Allocation{ 4648 ID: alloc.ID, 4649 ClientStatus: structs.AllocClientStatusRunning, 4650 TaskStates: ts, 4651 JobID: alloc.JobID, 4652 TaskGroup: alloc.TaskGroup, 4653 } 4654 update2 := &structs.Allocation{ 4655 ID: alloc.ID, 4656 ClientStatus: structs.AllocClientStatusPending, 4657 TaskStates: ts, 4658 JobID: alloc.JobID, 4659 TaskGroup: alloc.TaskGroup, 4660 } 4661 4662 err = state.UpdateAllocsFromClient(1001, []*structs.Allocation{update, update2}) 4663 if err != nil { 4664 t.Fatalf("err: %v", err) 4665 } 4666 4667 ws := memdb.NewWatchSet() 4668 out, err := state.AllocByID(ws, alloc.ID) 4669 if err != nil { 4670 t.Fatalf("err: %v", err) 4671 } 4672 4673 alloc.CreateIndex = 1000 4674 alloc.ModifyIndex = 1001 4675 alloc.TaskStates = ts 4676 alloc.ClientStatus = structs.AllocClientStatusPending 4677 if !reflect.DeepEqual(alloc, out) { 4678 t.Fatalf("bad: %#v , actual:%#v", alloc, out) 4679 } 4680 4681 summary, err := state.JobSummaryByID(ws, alloc.Namespace, alloc.JobID) 4682 expectedSummary := &structs.JobSummary{ 4683 JobID: alloc.JobID, 4684 Namespace: alloc.Namespace, 4685 Summary: map[string]structs.TaskGroupSummary{ 4686 "web": { 4687 Starting: 1, 4688 }, 4689 }, 4690 Children: new(structs.JobChildrenSummary), 4691 CreateIndex: 999, 4692 ModifyIndex: 1001, 4693 } 4694 if err != nil { 4695 t.Fatalf("err: %v", err) 4696 } 4697 if !reflect.DeepEqual(summary, expectedSummary) { 4698 t.Fatalf("expected: %#v, actual: %#v", expectedSummary, summary) 4699 } 4700 } 4701 4702 func TestStateStore_UpdateAllocsFromClient_Deployment(t *testing.T) { 4703 t.Parallel() 4704 require := require.New(t) 4705 4706 state := testStateStore(t) 4707 4708 alloc := mock.Alloc() 4709 now := time.Now() 4710 alloc.CreateTime = now.UnixNano() 4711 pdeadline := 5 * time.Minute 4712 deployment := mock.Deployment() 4713 deployment.TaskGroups[alloc.TaskGroup].ProgressDeadline = pdeadline 4714 alloc.DeploymentID = deployment.ID 4715 4716 require.Nil(state.UpsertJob(999, alloc.Job)) 4717 require.Nil(state.UpsertDeployment(1000, deployment)) 4718 require.Nil(state.UpsertAllocs(1001, []*structs.Allocation{alloc})) 4719 4720 healthy := now.Add(time.Second) 4721 update := &structs.Allocation{ 4722 ID: alloc.ID, 4723 ClientStatus: structs.AllocClientStatusRunning, 4724 JobID: alloc.JobID, 4725 TaskGroup: alloc.TaskGroup, 4726 DeploymentStatus: &structs.AllocDeploymentStatus{ 4727 Healthy: helper.BoolToPtr(true), 4728 Timestamp: healthy, 4729 }, 4730 } 4731 require.Nil(state.UpdateAllocsFromClient(1001, []*structs.Allocation{update})) 4732 4733 // Check that the deployment state was updated because the healthy 4734 // deployment 4735 dout, err := state.DeploymentByID(nil, deployment.ID) 4736 require.Nil(err) 4737 require.NotNil(dout) 4738 require.Len(dout.TaskGroups, 1) 4739 dstate := dout.TaskGroups[alloc.TaskGroup] 4740 require.NotNil(dstate) 4741 require.Equal(1, dstate.PlacedAllocs) 4742 require.True(healthy.Add(pdeadline).Equal(dstate.RequireProgressBy)) 4743 } 4744 4745 // This tests that the deployment state is merged correctly 4746 func TestStateStore_UpdateAllocsFromClient_DeploymentStateMerges(t *testing.T) { 4747 t.Parallel() 4748 require := require.New(t) 4749 4750 state := testStateStore(t) 4751 alloc := mock.Alloc() 4752 now := time.Now() 4753 alloc.CreateTime = now.UnixNano() 4754 pdeadline := 5 * time.Minute 4755 deployment := mock.Deployment() 4756 deployment.TaskGroups[alloc.TaskGroup].ProgressDeadline = pdeadline 4757 alloc.DeploymentID = deployment.ID 4758 alloc.DeploymentStatus = &structs.AllocDeploymentStatus{ 4759 Canary: true, 4760 } 4761 4762 require.Nil(state.UpsertJob(999, alloc.Job)) 4763 require.Nil(state.UpsertDeployment(1000, deployment)) 4764 require.Nil(state.UpsertAllocs(1001, []*structs.Allocation{alloc})) 4765 4766 update := &structs.Allocation{ 4767 ID: alloc.ID, 4768 ClientStatus: structs.AllocClientStatusRunning, 4769 JobID: alloc.JobID, 4770 TaskGroup: alloc.TaskGroup, 4771 DeploymentStatus: &structs.AllocDeploymentStatus{ 4772 Healthy: helper.BoolToPtr(true), 4773 Canary: false, 4774 }, 4775 } 4776 require.Nil(state.UpdateAllocsFromClient(1001, []*structs.Allocation{update})) 4777 4778 // Check that the merging of the deployment status was correct 4779 out, err := state.AllocByID(nil, alloc.ID) 4780 require.Nil(err) 4781 require.NotNil(out) 4782 require.True(out.DeploymentStatus.Canary) 4783 require.NotNil(out.DeploymentStatus.Healthy) 4784 require.True(*out.DeploymentStatus.Healthy) 4785 } 4786 4787 func TestStateStore_UpsertAlloc_Alloc(t *testing.T) { 4788 t.Parallel() 4789 4790 state := testStateStore(t) 4791 alloc := mock.Alloc() 4792 4793 if err := state.UpsertJob(999, alloc.Job); err != nil { 4794 t.Fatalf("err: %v", err) 4795 } 4796 4797 // Create watchsets so we can test that update fires the watch 4798 watches := make([]memdb.WatchSet, 4) 4799 for i := 0; i < 4; i++ { 4800 watches[i] = memdb.NewWatchSet() 4801 } 4802 if _, err := state.AllocByID(watches[0], alloc.ID); err != nil { 4803 t.Fatalf("bad: %v", err) 4804 } 4805 if _, err := state.AllocsByEval(watches[1], alloc.EvalID); err != nil { 4806 t.Fatalf("bad: %v", err) 4807 } 4808 if _, err := state.AllocsByJob(watches[2], alloc.Namespace, alloc.JobID, false); err != nil { 4809 t.Fatalf("bad: %v", err) 4810 } 4811 if _, err := state.AllocsByNode(watches[3], alloc.NodeID); err != nil { 4812 t.Fatalf("bad: %v", err) 4813 } 4814 4815 err := state.UpsertAllocs(1000, []*structs.Allocation{alloc}) 4816 if err != nil { 4817 t.Fatalf("err: %v", err) 4818 } 4819 4820 for i, ws := range watches { 4821 if !watchFired(ws) { 4822 t.Fatalf("bad %d", i) 4823 } 4824 } 4825 4826 ws := memdb.NewWatchSet() 4827 out, err := state.AllocByID(ws, alloc.ID) 4828 if err != nil { 4829 t.Fatalf("err: %v", err) 4830 } 4831 4832 if !reflect.DeepEqual(alloc, out) { 4833 t.Fatalf("bad: %#v %#v", alloc, out) 4834 } 4835 4836 index, err := state.Index("allocs") 4837 if err != nil { 4838 t.Fatalf("err: %v", err) 4839 } 4840 if index != 1000 { 4841 t.Fatalf("bad: %d", index) 4842 } 4843 4844 summary, err := state.JobSummaryByID(ws, alloc.Namespace, alloc.JobID) 4845 if err != nil { 4846 t.Fatalf("err: %v", err) 4847 } 4848 4849 tgSummary, ok := summary.Summary["web"] 4850 if !ok { 4851 t.Fatalf("no summary for task group web") 4852 } 4853 if tgSummary.Starting != 1 { 4854 t.Fatalf("expected queued: %v, actual: %v", 1, tgSummary.Starting) 4855 } 4856 4857 if watchFired(ws) { 4858 t.Fatalf("bad") 4859 } 4860 } 4861 4862 func TestStateStore_UpsertAlloc_Deployment(t *testing.T) { 4863 t.Parallel() 4864 require := require.New(t) 4865 4866 state := testStateStore(t) 4867 alloc := mock.Alloc() 4868 now := time.Now() 4869 alloc.CreateTime = now.UnixNano() 4870 alloc.ModifyTime = now.UnixNano() 4871 pdeadline := 5 * time.Minute 4872 deployment := mock.Deployment() 4873 deployment.TaskGroups[alloc.TaskGroup].ProgressDeadline = pdeadline 4874 alloc.DeploymentID = deployment.ID 4875 4876 require.Nil(state.UpsertJob(999, alloc.Job)) 4877 require.Nil(state.UpsertDeployment(1000, deployment)) 4878 4879 // Create a watch set so we can test that update fires the watch 4880 ws := memdb.NewWatchSet() 4881 require.Nil(state.AllocsByDeployment(ws, alloc.DeploymentID)) 4882 4883 err := state.UpsertAllocs(1001, []*structs.Allocation{alloc}) 4884 require.Nil(err) 4885 4886 if !watchFired(ws) { 4887 t.Fatalf("watch not fired") 4888 } 4889 4890 ws = memdb.NewWatchSet() 4891 allocs, err := state.AllocsByDeployment(ws, alloc.DeploymentID) 4892 require.Nil(err) 4893 require.Len(allocs, 1) 4894 require.EqualValues(alloc, allocs[0]) 4895 4896 index, err := state.Index("allocs") 4897 require.Nil(err) 4898 require.EqualValues(1001, index) 4899 if watchFired(ws) { 4900 t.Fatalf("bad") 4901 } 4902 4903 // Check that the deployment state was updated 4904 dout, err := state.DeploymentByID(nil, deployment.ID) 4905 require.Nil(err) 4906 require.NotNil(dout) 4907 require.Len(dout.TaskGroups, 1) 4908 dstate := dout.TaskGroups[alloc.TaskGroup] 4909 require.NotNil(dstate) 4910 require.Equal(1, dstate.PlacedAllocs) 4911 require.True(now.Add(pdeadline).Equal(dstate.RequireProgressBy)) 4912 } 4913 4914 // Testing to ensure we keep issue 4915 // https://github.com/hashicorp/nomad/issues/2583 fixed 4916 func TestStateStore_UpsertAlloc_No_Job(t *testing.T) { 4917 t.Parallel() 4918 4919 state := testStateStore(t) 4920 alloc := mock.Alloc() 4921 alloc.Job = nil 4922 4923 err := state.UpsertAllocs(999, []*structs.Allocation{alloc}) 4924 if err == nil || !strings.Contains(err.Error(), "without a job") { 4925 t.Fatalf("expect err: %v", err) 4926 } 4927 } 4928 4929 func TestStateStore_UpsertAlloc_ChildJob(t *testing.T) { 4930 t.Parallel() 4931 4932 state := testStateStore(t) 4933 4934 parent := mock.Job() 4935 if err := state.UpsertJob(998, parent); err != nil { 4936 t.Fatalf("err: %v", err) 4937 } 4938 4939 child := mock.Job() 4940 child.ParentID = parent.ID 4941 4942 if err := state.UpsertJob(999, child); err != nil { 4943 t.Fatalf("err: %v", err) 4944 } 4945 4946 alloc := mock.Alloc() 4947 alloc.JobID = child.ID 4948 alloc.Job = child 4949 4950 // Create watchsets so we can test that delete fires the watch 4951 ws := memdb.NewWatchSet() 4952 if _, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID); err != nil { 4953 t.Fatalf("bad: %v", err) 4954 } 4955 4956 err := state.UpsertAllocs(1000, []*structs.Allocation{alloc}) 4957 if err != nil { 4958 t.Fatalf("err: %v", err) 4959 } 4960 4961 if !watchFired(ws) { 4962 t.Fatalf("bad") 4963 } 4964 4965 ws = memdb.NewWatchSet() 4966 summary, err := state.JobSummaryByID(ws, parent.Namespace, parent.ID) 4967 if err != nil { 4968 t.Fatalf("err: %v", err) 4969 } 4970 if summary == nil { 4971 t.Fatalf("nil summary") 4972 } 4973 if summary.JobID != parent.ID { 4974 t.Fatalf("bad summary id: %v", parent.ID) 4975 } 4976 if summary.Children == nil { 4977 t.Fatalf("nil children summary") 4978 } 4979 if summary.Children.Pending != 0 || summary.Children.Running != 1 || summary.Children.Dead != 0 { 4980 t.Fatalf("bad children summary: %v", summary.Children) 4981 } 4982 4983 if watchFired(ws) { 4984 t.Fatalf("bad") 4985 } 4986 } 4987 4988 func TestStateStore_UpdateAlloc_Alloc(t *testing.T) { 4989 t.Parallel() 4990 4991 state := testStateStore(t) 4992 alloc := mock.Alloc() 4993 4994 if err := state.UpsertJob(999, alloc.Job); err != nil { 4995 t.Fatalf("err: %v", err) 4996 } 4997 4998 err := state.UpsertAllocs(1000, []*structs.Allocation{alloc}) 4999 if err != nil { 5000 t.Fatalf("err: %v", err) 5001 } 5002 5003 ws := memdb.NewWatchSet() 5004 summary, err := state.JobSummaryByID(ws, alloc.Namespace, alloc.JobID) 5005 if err != nil { 5006 t.Fatalf("err: %v", err) 5007 } 5008 tgSummary := summary.Summary["web"] 5009 if tgSummary.Starting != 1 { 5010 t.Fatalf("expected starting: %v, actual: %v", 1, tgSummary.Starting) 5011 } 5012 5013 alloc2 := mock.Alloc() 5014 alloc2.ID = alloc.ID 5015 alloc2.NodeID = alloc.NodeID + ".new" 5016 state.UpsertJobSummary(1001, mock.JobSummary(alloc2.JobID)) 5017 5018 // Create watchsets so we can test that update fires the watch 5019 watches := make([]memdb.WatchSet, 4) 5020 for i := 0; i < 4; i++ { 5021 watches[i] = memdb.NewWatchSet() 5022 } 5023 if _, err := state.AllocByID(watches[0], alloc2.ID); err != nil { 5024 t.Fatalf("bad: %v", err) 5025 } 5026 if _, err := state.AllocsByEval(watches[1], alloc2.EvalID); err != nil { 5027 t.Fatalf("bad: %v", err) 5028 } 5029 if _, err := state.AllocsByJob(watches[2], alloc2.Namespace, alloc2.JobID, false); err != nil { 5030 t.Fatalf("bad: %v", err) 5031 } 5032 if _, err := state.AllocsByNode(watches[3], alloc2.NodeID); err != nil { 5033 t.Fatalf("bad: %v", err) 5034 } 5035 5036 err = state.UpsertAllocs(1002, []*structs.Allocation{alloc2}) 5037 if err != nil { 5038 t.Fatalf("err: %v", err) 5039 } 5040 5041 for i, ws := range watches { 5042 if !watchFired(ws) { 5043 t.Fatalf("bad %d", i) 5044 } 5045 } 5046 5047 ws = memdb.NewWatchSet() 5048 out, err := state.AllocByID(ws, alloc.ID) 5049 if err != nil { 5050 t.Fatalf("err: %v", err) 5051 } 5052 5053 if !reflect.DeepEqual(alloc2, out) { 5054 t.Fatalf("bad: %#v %#v", alloc2, out) 5055 } 5056 5057 if out.CreateIndex != 1000 { 5058 t.Fatalf("bad: %#v", out) 5059 } 5060 if out.ModifyIndex != 1002 { 5061 t.Fatalf("bad: %#v", out) 5062 } 5063 5064 index, err := state.Index("allocs") 5065 if err != nil { 5066 t.Fatalf("err: %v", err) 5067 } 5068 if index != 1002 { 5069 t.Fatalf("bad: %d", index) 5070 } 5071 5072 // Ensure that summary hasb't changed 5073 summary, err = state.JobSummaryByID(ws, alloc.Namespace, alloc.JobID) 5074 if err != nil { 5075 t.Fatalf("err: %v", err) 5076 } 5077 tgSummary = summary.Summary["web"] 5078 if tgSummary.Starting != 1 { 5079 t.Fatalf("expected starting: %v, actual: %v", 1, tgSummary.Starting) 5080 } 5081 5082 if watchFired(ws) { 5083 t.Fatalf("bad") 5084 } 5085 } 5086 5087 // This test ensures that the state store will mark the clients status as lost 5088 // when set rather than preferring the existing status. 5089 func TestStateStore_UpdateAlloc_Lost(t *testing.T) { 5090 t.Parallel() 5091 5092 state := testStateStore(t) 5093 alloc := mock.Alloc() 5094 alloc.ClientStatus = "foo" 5095 5096 if err := state.UpsertJob(999, alloc.Job); err != nil { 5097 t.Fatalf("err: %v", err) 5098 } 5099 5100 err := state.UpsertAllocs(1000, []*structs.Allocation{alloc}) 5101 if err != nil { 5102 t.Fatalf("err: %v", err) 5103 } 5104 5105 alloc2 := new(structs.Allocation) 5106 *alloc2 = *alloc 5107 alloc2.ClientStatus = structs.AllocClientStatusLost 5108 if err := state.UpsertAllocs(1001, []*structs.Allocation{alloc2}); err != nil { 5109 t.Fatalf("err: %v", err) 5110 } 5111 5112 ws := memdb.NewWatchSet() 5113 out, err := state.AllocByID(ws, alloc2.ID) 5114 if err != nil { 5115 t.Fatalf("err: %v", err) 5116 } 5117 5118 if out.ClientStatus != structs.AllocClientStatusLost { 5119 t.Fatalf("bad: %#v", out) 5120 } 5121 } 5122 5123 // This test ensures an allocation can be updated when there is no job 5124 // associated with it. This will happen when a job is stopped by an user which 5125 // has non-terminal allocations on clients 5126 func TestStateStore_UpdateAlloc_NoJob(t *testing.T) { 5127 t.Parallel() 5128 5129 state := testStateStore(t) 5130 alloc := mock.Alloc() 5131 5132 // Upsert a job 5133 state.UpsertJobSummary(998, mock.JobSummary(alloc.JobID)) 5134 if err := state.UpsertJob(999, alloc.Job); err != nil { 5135 t.Fatalf("err: %v", err) 5136 } 5137 5138 err := state.UpsertAllocs(1000, []*structs.Allocation{alloc}) 5139 if err != nil { 5140 t.Fatalf("err: %v", err) 5141 } 5142 5143 if err := state.DeleteJob(1001, alloc.Namespace, alloc.JobID); err != nil { 5144 t.Fatalf("err: %v", err) 5145 } 5146 5147 // Update the desired state of the allocation to stop 5148 allocCopy := alloc.Copy() 5149 allocCopy.DesiredStatus = structs.AllocDesiredStatusStop 5150 if err := state.UpsertAllocs(1002, []*structs.Allocation{allocCopy}); err != nil { 5151 t.Fatalf("err: %v", err) 5152 } 5153 5154 // Update the client state of the allocation to complete 5155 allocCopy1 := allocCopy.Copy() 5156 allocCopy1.ClientStatus = structs.AllocClientStatusComplete 5157 if err := state.UpdateAllocsFromClient(1003, []*structs.Allocation{allocCopy1}); err != nil { 5158 t.Fatalf("err: %v", err) 5159 } 5160 5161 ws := memdb.NewWatchSet() 5162 out, _ := state.AllocByID(ws, alloc.ID) 5163 // Update the modify index of the alloc before comparing 5164 allocCopy1.ModifyIndex = 1003 5165 if !reflect.DeepEqual(out, allocCopy1) { 5166 t.Fatalf("expected: %#v \n actual: %#v", allocCopy1, out) 5167 } 5168 } 5169 5170 func TestStateStore_UpdateAllocDesiredTransition(t *testing.T) { 5171 t.Parallel() 5172 require := require.New(t) 5173 5174 state := testStateStore(t) 5175 alloc := mock.Alloc() 5176 5177 require.Nil(state.UpsertJob(999, alloc.Job)) 5178 require.Nil(state.UpsertAllocs(1000, []*structs.Allocation{alloc})) 5179 5180 t1 := &structs.DesiredTransition{ 5181 Migrate: helper.BoolToPtr(true), 5182 } 5183 t2 := &structs.DesiredTransition{ 5184 Migrate: helper.BoolToPtr(false), 5185 } 5186 eval := &structs.Evaluation{ 5187 ID: uuid.Generate(), 5188 Namespace: alloc.Namespace, 5189 Priority: alloc.Job.Priority, 5190 Type: alloc.Job.Type, 5191 TriggeredBy: structs.EvalTriggerNodeDrain, 5192 JobID: alloc.Job.ID, 5193 JobModifyIndex: alloc.Job.ModifyIndex, 5194 Status: structs.EvalStatusPending, 5195 } 5196 evals := []*structs.Evaluation{eval} 5197 5198 m := map[string]*structs.DesiredTransition{alloc.ID: t1} 5199 require.Nil(state.UpdateAllocsDesiredTransitions(1001, m, evals)) 5200 5201 ws := memdb.NewWatchSet() 5202 out, err := state.AllocByID(ws, alloc.ID) 5203 require.Nil(err) 5204 require.NotNil(out.DesiredTransition.Migrate) 5205 require.True(*out.DesiredTransition.Migrate) 5206 require.EqualValues(1000, out.CreateIndex) 5207 require.EqualValues(1001, out.ModifyIndex) 5208 5209 index, err := state.Index("allocs") 5210 require.Nil(err) 5211 require.EqualValues(1001, index) 5212 5213 // Check the eval is created 5214 eout, err := state.EvalByID(nil, eval.ID) 5215 require.Nil(err) 5216 require.NotNil(eout) 5217 5218 m = map[string]*structs.DesiredTransition{alloc.ID: t2} 5219 require.Nil(state.UpdateAllocsDesiredTransitions(1002, m, evals)) 5220 5221 ws = memdb.NewWatchSet() 5222 out, err = state.AllocByID(ws, alloc.ID) 5223 require.Nil(err) 5224 require.NotNil(out.DesiredTransition.Migrate) 5225 require.False(*out.DesiredTransition.Migrate) 5226 require.EqualValues(1000, out.CreateIndex) 5227 require.EqualValues(1002, out.ModifyIndex) 5228 5229 index, err = state.Index("allocs") 5230 require.Nil(err) 5231 require.EqualValues(1002, index) 5232 5233 // Try with a bogus alloc id 5234 m = map[string]*structs.DesiredTransition{uuid.Generate(): t2} 5235 require.Nil(state.UpdateAllocsDesiredTransitions(1003, m, evals)) 5236 } 5237 5238 func TestStateStore_JobSummary(t *testing.T) { 5239 t.Parallel() 5240 5241 state := testStateStore(t) 5242 5243 // Add a job 5244 job := mock.Job() 5245 state.UpsertJob(900, job) 5246 5247 // Get the job back 5248 ws := memdb.NewWatchSet() 5249 outJob, _ := state.JobByID(ws, job.Namespace, job.ID) 5250 if outJob.CreateIndex != 900 { 5251 t.Fatalf("bad create index: %v", outJob.CreateIndex) 5252 } 5253 summary, _ := state.JobSummaryByID(ws, job.Namespace, job.ID) 5254 if summary.CreateIndex != 900 { 5255 t.Fatalf("bad create index: %v", summary.CreateIndex) 5256 } 5257 5258 // Upsert an allocation 5259 alloc := mock.Alloc() 5260 alloc.JobID = job.ID 5261 alloc.Job = job 5262 state.UpsertAllocs(910, []*structs.Allocation{alloc}) 5263 5264 // Update the alloc from client 5265 alloc1 := alloc.Copy() 5266 alloc1.ClientStatus = structs.AllocClientStatusPending 5267 alloc1.DesiredStatus = "" 5268 state.UpdateAllocsFromClient(920, []*structs.Allocation{alloc}) 5269 5270 alloc3 := alloc.Copy() 5271 alloc3.ClientStatus = structs.AllocClientStatusRunning 5272 alloc3.DesiredStatus = "" 5273 state.UpdateAllocsFromClient(930, []*structs.Allocation{alloc3}) 5274 5275 // Upsert the alloc 5276 alloc4 := alloc.Copy() 5277 alloc4.ClientStatus = structs.AllocClientStatusPending 5278 alloc4.DesiredStatus = structs.AllocDesiredStatusRun 5279 state.UpsertAllocs(950, []*structs.Allocation{alloc4}) 5280 5281 // Again upsert the alloc 5282 alloc5 := alloc.Copy() 5283 alloc5.ClientStatus = structs.AllocClientStatusPending 5284 alloc5.DesiredStatus = structs.AllocDesiredStatusRun 5285 state.UpsertAllocs(970, []*structs.Allocation{alloc5}) 5286 5287 if !watchFired(ws) { 5288 t.Fatalf("bad") 5289 } 5290 5291 expectedSummary := structs.JobSummary{ 5292 JobID: job.ID, 5293 Namespace: job.Namespace, 5294 Summary: map[string]structs.TaskGroupSummary{ 5295 "web": { 5296 Running: 1, 5297 }, 5298 }, 5299 Children: new(structs.JobChildrenSummary), 5300 CreateIndex: 900, 5301 ModifyIndex: 930, 5302 } 5303 5304 summary, _ = state.JobSummaryByID(ws, job.Namespace, job.ID) 5305 if !reflect.DeepEqual(&expectedSummary, summary) { 5306 t.Fatalf("expected: %#v, actual: %v", expectedSummary, summary) 5307 } 5308 5309 // De-register the job. 5310 state.DeleteJob(980, job.Namespace, job.ID) 5311 5312 // Shouldn't have any effect on the summary 5313 alloc6 := alloc.Copy() 5314 alloc6.ClientStatus = structs.AllocClientStatusRunning 5315 alloc6.DesiredStatus = "" 5316 state.UpdateAllocsFromClient(990, []*structs.Allocation{alloc6}) 5317 5318 // We shouldn't have any summary at this point 5319 summary, _ = state.JobSummaryByID(ws, job.Namespace, job.ID) 5320 if summary != nil { 5321 t.Fatalf("expected nil, actual: %#v", summary) 5322 } 5323 5324 // Re-register the same job 5325 job1 := mock.Job() 5326 job1.ID = job.ID 5327 state.UpsertJob(1000, job1) 5328 outJob2, _ := state.JobByID(ws, job1.Namespace, job1.ID) 5329 if outJob2.CreateIndex != 1000 { 5330 t.Fatalf("bad create index: %v", outJob2.CreateIndex) 5331 } 5332 summary, _ = state.JobSummaryByID(ws, job1.Namespace, job1.ID) 5333 if summary.CreateIndex != 1000 { 5334 t.Fatalf("bad create index: %v", summary.CreateIndex) 5335 } 5336 5337 // Upsert an allocation 5338 alloc7 := alloc.Copy() 5339 alloc7.JobID = outJob.ID 5340 alloc7.Job = outJob 5341 alloc7.ClientStatus = structs.AllocClientStatusComplete 5342 alloc7.DesiredStatus = structs.AllocDesiredStatusRun 5343 state.UpdateAllocsFromClient(1020, []*structs.Allocation{alloc7}) 5344 5345 expectedSummary = structs.JobSummary{ 5346 JobID: job.ID, 5347 Namespace: job.Namespace, 5348 Summary: map[string]structs.TaskGroupSummary{ 5349 "web": {}, 5350 }, 5351 Children: new(structs.JobChildrenSummary), 5352 CreateIndex: 1000, 5353 ModifyIndex: 1000, 5354 } 5355 5356 summary, _ = state.JobSummaryByID(ws, job1.Namespace, job1.ID) 5357 if !reflect.DeepEqual(&expectedSummary, summary) { 5358 t.Fatalf("expected: %#v, actual: %#v", expectedSummary, summary) 5359 } 5360 } 5361 5362 func TestStateStore_ReconcileJobSummary(t *testing.T) { 5363 t.Parallel() 5364 5365 state := testStateStore(t) 5366 5367 // Create an alloc 5368 alloc := mock.Alloc() 5369 5370 // Add another task group to the job 5371 tg2 := alloc.Job.TaskGroups[0].Copy() 5372 tg2.Name = "db" 5373 alloc.Job.TaskGroups = append(alloc.Job.TaskGroups, tg2) 5374 state.UpsertJob(100, alloc.Job) 5375 5376 // Create one more alloc for the db task group 5377 alloc2 := mock.Alloc() 5378 alloc2.TaskGroup = "db" 5379 alloc2.JobID = alloc.JobID 5380 alloc2.Job = alloc.Job 5381 5382 // Upserts the alloc 5383 state.UpsertAllocs(110, []*structs.Allocation{alloc, alloc2}) 5384 5385 // Change the state of the first alloc to running 5386 alloc3 := alloc.Copy() 5387 alloc3.ClientStatus = structs.AllocClientStatusRunning 5388 state.UpdateAllocsFromClient(120, []*structs.Allocation{alloc3}) 5389 5390 //Add some more allocs to the second tg 5391 alloc4 := mock.Alloc() 5392 alloc4.JobID = alloc.JobID 5393 alloc4.Job = alloc.Job 5394 alloc4.TaskGroup = "db" 5395 alloc5 := alloc4.Copy() 5396 alloc5.ClientStatus = structs.AllocClientStatusRunning 5397 5398 alloc6 := mock.Alloc() 5399 alloc6.JobID = alloc.JobID 5400 alloc6.Job = alloc.Job 5401 alloc6.TaskGroup = "db" 5402 alloc7 := alloc6.Copy() 5403 alloc7.ClientStatus = structs.AllocClientStatusComplete 5404 5405 alloc8 := mock.Alloc() 5406 alloc8.JobID = alloc.JobID 5407 alloc8.Job = alloc.Job 5408 alloc8.TaskGroup = "db" 5409 alloc9 := alloc8.Copy() 5410 alloc9.ClientStatus = structs.AllocClientStatusFailed 5411 5412 alloc10 := mock.Alloc() 5413 alloc10.JobID = alloc.JobID 5414 alloc10.Job = alloc.Job 5415 alloc10.TaskGroup = "db" 5416 alloc11 := alloc10.Copy() 5417 alloc11.ClientStatus = structs.AllocClientStatusLost 5418 5419 state.UpsertAllocs(130, []*structs.Allocation{alloc4, alloc6, alloc8, alloc10}) 5420 5421 state.UpdateAllocsFromClient(150, []*structs.Allocation{alloc5, alloc7, alloc9, alloc11}) 5422 5423 // DeleteJobSummary is a helper method and doesn't modify the indexes table 5424 state.DeleteJobSummary(130, alloc.Namespace, alloc.Job.ID) 5425 5426 state.ReconcileJobSummaries(120) 5427 5428 ws := memdb.NewWatchSet() 5429 summary, _ := state.JobSummaryByID(ws, alloc.Namespace, alloc.Job.ID) 5430 expectedSummary := structs.JobSummary{ 5431 JobID: alloc.Job.ID, 5432 Namespace: alloc.Namespace, 5433 Summary: map[string]structs.TaskGroupSummary{ 5434 "web": { 5435 Running: 1, 5436 }, 5437 "db": { 5438 Starting: 1, 5439 Running: 1, 5440 Failed: 1, 5441 Complete: 1, 5442 Lost: 1, 5443 }, 5444 }, 5445 CreateIndex: 100, 5446 ModifyIndex: 120, 5447 } 5448 if !reflect.DeepEqual(&expectedSummary, summary) { 5449 t.Fatalf("expected: %v, actual: %v", expectedSummary, summary) 5450 } 5451 } 5452 5453 func TestStateStore_ReconcileParentJobSummary(t *testing.T) { 5454 t.Parallel() 5455 require := require.New(t) 5456 5457 state := testStateStore(t) 5458 5459 // Add a node 5460 node := mock.Node() 5461 state.UpsertNode(80, node) 5462 5463 // Make a parameterized job 5464 job1 := mock.BatchJob() 5465 job1.ID = "test" 5466 job1.ParameterizedJob = &structs.ParameterizedJobConfig{ 5467 Payload: "random", 5468 } 5469 job1.TaskGroups[0].Count = 1 5470 state.UpsertJob(100, job1) 5471 5472 // Make a child job 5473 childJob := job1.Copy() 5474 childJob.ID = job1.ID + "dispatch-23423423" 5475 childJob.ParentID = job1.ID 5476 childJob.Dispatched = true 5477 childJob.Status = structs.JobStatusRunning 5478 5479 // Make some allocs for child job 5480 alloc := mock.Alloc() 5481 alloc.NodeID = node.ID 5482 alloc.Job = childJob 5483 alloc.JobID = childJob.ID 5484 alloc.ClientStatus = structs.AllocClientStatusRunning 5485 5486 alloc2 := mock.Alloc() 5487 alloc2.NodeID = node.ID 5488 alloc2.Job = childJob 5489 alloc2.JobID = childJob.ID 5490 alloc2.ClientStatus = structs.AllocClientStatusFailed 5491 5492 require.Nil(state.UpsertJob(110, childJob)) 5493 require.Nil(state.UpsertAllocs(111, []*structs.Allocation{alloc, alloc2})) 5494 5495 // Make the summary incorrect in the state store 5496 summary, err := state.JobSummaryByID(nil, job1.Namespace, job1.ID) 5497 require.Nil(err) 5498 5499 summary.Children = nil 5500 summary.Summary = make(map[string]structs.TaskGroupSummary) 5501 summary.Summary["web"] = structs.TaskGroupSummary{ 5502 Queued: 1, 5503 } 5504 5505 // Delete the child job summary 5506 state.DeleteJobSummary(125, childJob.Namespace, childJob.ID) 5507 5508 state.ReconcileJobSummaries(120) 5509 5510 ws := memdb.NewWatchSet() 5511 5512 // Verify parent summary is corrected 5513 summary, _ = state.JobSummaryByID(ws, alloc.Namespace, job1.ID) 5514 expectedSummary := structs.JobSummary{ 5515 JobID: job1.ID, 5516 Namespace: job1.Namespace, 5517 Summary: make(map[string]structs.TaskGroupSummary), 5518 Children: &structs.JobChildrenSummary{ 5519 Running: 1, 5520 }, 5521 CreateIndex: 100, 5522 ModifyIndex: 120, 5523 } 5524 require.Equal(&expectedSummary, summary) 5525 5526 // Verify child job summary is also correct 5527 childSummary, _ := state.JobSummaryByID(ws, childJob.Namespace, childJob.ID) 5528 expectedChildSummary := structs.JobSummary{ 5529 JobID: childJob.ID, 5530 Namespace: childJob.Namespace, 5531 Summary: map[string]structs.TaskGroupSummary{ 5532 "web": { 5533 Running: 1, 5534 Failed: 1, 5535 }, 5536 }, 5537 CreateIndex: 110, 5538 ModifyIndex: 120, 5539 } 5540 require.Equal(&expectedChildSummary, childSummary) 5541 } 5542 5543 func TestStateStore_UpdateAlloc_JobNotPresent(t *testing.T) { 5544 t.Parallel() 5545 5546 state := testStateStore(t) 5547 5548 alloc := mock.Alloc() 5549 state.UpsertJob(100, alloc.Job) 5550 state.UpsertAllocs(200, []*structs.Allocation{alloc}) 5551 5552 // Delete the job 5553 state.DeleteJob(300, alloc.Namespace, alloc.Job.ID) 5554 5555 // Update the alloc 5556 alloc1 := alloc.Copy() 5557 alloc1.ClientStatus = structs.AllocClientStatusRunning 5558 5559 // Updating allocation should not throw any error 5560 if err := state.UpdateAllocsFromClient(400, []*structs.Allocation{alloc1}); err != nil { 5561 t.Fatalf("expect err: %v", err) 5562 } 5563 5564 // Re-Register the job 5565 state.UpsertJob(500, alloc.Job) 5566 5567 // Update the alloc again 5568 alloc2 := alloc.Copy() 5569 alloc2.ClientStatus = structs.AllocClientStatusComplete 5570 if err := state.UpdateAllocsFromClient(400, []*structs.Allocation{alloc1}); err != nil { 5571 t.Fatalf("expect err: %v", err) 5572 } 5573 5574 // Job Summary of the newly registered job shouldn't account for the 5575 // allocation update for the older job 5576 expectedSummary := structs.JobSummary{ 5577 JobID: alloc1.JobID, 5578 Namespace: alloc1.Namespace, 5579 Summary: map[string]structs.TaskGroupSummary{ 5580 "web": {}, 5581 }, 5582 Children: new(structs.JobChildrenSummary), 5583 CreateIndex: 500, 5584 ModifyIndex: 500, 5585 } 5586 5587 ws := memdb.NewWatchSet() 5588 summary, _ := state.JobSummaryByID(ws, alloc.Namespace, alloc.Job.ID) 5589 if !reflect.DeepEqual(&expectedSummary, summary) { 5590 t.Fatalf("expected: %v, actual: %v", expectedSummary, summary) 5591 } 5592 } 5593 5594 func TestStateStore_EvictAlloc_Alloc(t *testing.T) { 5595 t.Parallel() 5596 5597 state := testStateStore(t) 5598 alloc := mock.Alloc() 5599 5600 state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)) 5601 err := state.UpsertAllocs(1000, []*structs.Allocation{alloc}) 5602 if err != nil { 5603 t.Fatalf("err: %v", err) 5604 } 5605 5606 alloc2 := new(structs.Allocation) 5607 *alloc2 = *alloc 5608 alloc2.DesiredStatus = structs.AllocDesiredStatusEvict 5609 err = state.UpsertAllocs(1001, []*structs.Allocation{alloc2}) 5610 if err != nil { 5611 t.Fatalf("err: %v", err) 5612 } 5613 5614 ws := memdb.NewWatchSet() 5615 out, err := state.AllocByID(ws, alloc.ID) 5616 if err != nil { 5617 t.Fatalf("err: %v", err) 5618 } 5619 5620 if out.DesiredStatus != structs.AllocDesiredStatusEvict { 5621 t.Fatalf("bad: %#v %#v", alloc, out) 5622 } 5623 5624 index, err := state.Index("allocs") 5625 if err != nil { 5626 t.Fatalf("err: %v", err) 5627 } 5628 if index != 1001 { 5629 t.Fatalf("bad: %d", index) 5630 } 5631 } 5632 5633 func TestStateStore_AllocsByNode(t *testing.T) { 5634 t.Parallel() 5635 5636 state := testStateStore(t) 5637 var allocs []*structs.Allocation 5638 5639 for i := 0; i < 10; i++ { 5640 alloc := mock.Alloc() 5641 alloc.NodeID = "foo" 5642 allocs = append(allocs, alloc) 5643 } 5644 5645 for idx, alloc := range allocs { 5646 state.UpsertJobSummary(uint64(900+idx), mock.JobSummary(alloc.JobID)) 5647 } 5648 5649 err := state.UpsertAllocs(1000, allocs) 5650 if err != nil { 5651 t.Fatalf("err: %v", err) 5652 } 5653 5654 ws := memdb.NewWatchSet() 5655 out, err := state.AllocsByNode(ws, "foo") 5656 if err != nil { 5657 t.Fatalf("err: %v", err) 5658 } 5659 5660 sort.Sort(AllocIDSort(allocs)) 5661 sort.Sort(AllocIDSort(out)) 5662 5663 if !reflect.DeepEqual(allocs, out) { 5664 t.Fatalf("bad: %#v %#v", allocs, out) 5665 } 5666 5667 if watchFired(ws) { 5668 t.Fatalf("bad") 5669 } 5670 } 5671 5672 func TestStateStore_AllocsByNodeTerminal(t *testing.T) { 5673 t.Parallel() 5674 5675 state := testStateStore(t) 5676 var allocs, term, nonterm []*structs.Allocation 5677 5678 for i := 0; i < 10; i++ { 5679 alloc := mock.Alloc() 5680 alloc.NodeID = "foo" 5681 if i%2 == 0 { 5682 alloc.DesiredStatus = structs.AllocDesiredStatusStop 5683 term = append(term, alloc) 5684 } else { 5685 nonterm = append(nonterm, alloc) 5686 } 5687 allocs = append(allocs, alloc) 5688 } 5689 5690 for idx, alloc := range allocs { 5691 state.UpsertJobSummary(uint64(900+idx), mock.JobSummary(alloc.JobID)) 5692 } 5693 5694 err := state.UpsertAllocs(1000, allocs) 5695 if err != nil { 5696 t.Fatalf("err: %v", err) 5697 } 5698 5699 // Verify the terminal allocs 5700 ws := memdb.NewWatchSet() 5701 out, err := state.AllocsByNodeTerminal(ws, "foo", true) 5702 if err != nil { 5703 t.Fatalf("err: %v", err) 5704 } 5705 5706 sort.Sort(AllocIDSort(term)) 5707 sort.Sort(AllocIDSort(out)) 5708 5709 if !reflect.DeepEqual(term, out) { 5710 t.Fatalf("bad: %#v %#v", term, out) 5711 } 5712 5713 // Verify the non-terminal allocs 5714 out, err = state.AllocsByNodeTerminal(ws, "foo", false) 5715 if err != nil { 5716 t.Fatalf("err: %v", err) 5717 } 5718 5719 sort.Sort(AllocIDSort(nonterm)) 5720 sort.Sort(AllocIDSort(out)) 5721 5722 if !reflect.DeepEqual(nonterm, out) { 5723 t.Fatalf("bad: %#v %#v", nonterm, out) 5724 } 5725 5726 if watchFired(ws) { 5727 t.Fatalf("bad") 5728 } 5729 } 5730 5731 func TestStateStore_AllocsByJob(t *testing.T) { 5732 t.Parallel() 5733 5734 state := testStateStore(t) 5735 var allocs []*structs.Allocation 5736 5737 for i := 0; i < 10; i++ { 5738 alloc := mock.Alloc() 5739 alloc.JobID = "foo" 5740 allocs = append(allocs, alloc) 5741 } 5742 5743 for i, alloc := range allocs { 5744 state.UpsertJobSummary(uint64(900+i), mock.JobSummary(alloc.JobID)) 5745 } 5746 5747 err := state.UpsertAllocs(1000, allocs) 5748 if err != nil { 5749 t.Fatalf("err: %v", err) 5750 } 5751 5752 ws := memdb.NewWatchSet() 5753 out, err := state.AllocsByJob(ws, mock.Alloc().Namespace, "foo", false) 5754 if err != nil { 5755 t.Fatalf("err: %v", err) 5756 } 5757 5758 sort.Sort(AllocIDSort(allocs)) 5759 sort.Sort(AllocIDSort(out)) 5760 5761 if !reflect.DeepEqual(allocs, out) { 5762 t.Fatalf("bad: %#v %#v", allocs, out) 5763 } 5764 5765 if watchFired(ws) { 5766 t.Fatalf("bad") 5767 } 5768 } 5769 5770 func TestStateStore_AllocsForRegisteredJob(t *testing.T) { 5771 t.Parallel() 5772 5773 state := testStateStore(t) 5774 var allocs []*structs.Allocation 5775 var allocs1 []*structs.Allocation 5776 5777 job := mock.Job() 5778 job.ID = "foo" 5779 state.UpsertJob(100, job) 5780 for i := 0; i < 3; i++ { 5781 alloc := mock.Alloc() 5782 alloc.Job = job 5783 alloc.JobID = job.ID 5784 allocs = append(allocs, alloc) 5785 } 5786 if err := state.UpsertAllocs(200, allocs); err != nil { 5787 t.Fatalf("err: %v", err) 5788 } 5789 5790 if err := state.DeleteJob(250, job.Namespace, job.ID); err != nil { 5791 t.Fatalf("err: %v", err) 5792 } 5793 5794 job1 := mock.Job() 5795 job1.ID = "foo" 5796 job1.CreateIndex = 50 5797 state.UpsertJob(300, job1) 5798 for i := 0; i < 4; i++ { 5799 alloc := mock.Alloc() 5800 alloc.Job = job1 5801 alloc.JobID = job1.ID 5802 allocs1 = append(allocs1, alloc) 5803 } 5804 5805 if err := state.UpsertAllocs(1000, allocs1); err != nil { 5806 t.Fatalf("err: %v", err) 5807 } 5808 5809 ws := memdb.NewWatchSet() 5810 out, err := state.AllocsByJob(ws, job1.Namespace, job1.ID, true) 5811 if err != nil { 5812 t.Fatalf("err: %v", err) 5813 } 5814 5815 expected := len(allocs) + len(allocs1) 5816 if len(out) != expected { 5817 t.Fatalf("expected: %v, actual: %v", expected, len(out)) 5818 } 5819 5820 out1, err := state.AllocsByJob(ws, job1.Namespace, job1.ID, false) 5821 if err != nil { 5822 t.Fatalf("bad: %v", err) 5823 } 5824 5825 expected = len(allocs1) 5826 if len(out1) != expected { 5827 t.Fatalf("expected: %v, actual: %v", expected, len(out1)) 5828 } 5829 5830 if watchFired(ws) { 5831 t.Fatalf("bad") 5832 } 5833 } 5834 5835 func TestStateStore_AllocsByIDPrefix(t *testing.T) { 5836 t.Parallel() 5837 5838 state := testStateStore(t) 5839 var allocs []*structs.Allocation 5840 5841 ids := []string{ 5842 "aaaaaaaa-7bfb-395d-eb95-0685af2176b2", 5843 "aaaaaaab-7bfb-395d-eb95-0685af2176b2", 5844 "aaaaaabb-7bfb-395d-eb95-0685af2176b2", 5845 "aaaaabbb-7bfb-395d-eb95-0685af2176b2", 5846 "aaaabbbb-7bfb-395d-eb95-0685af2176b2", 5847 "aaabbbbb-7bfb-395d-eb95-0685af2176b2", 5848 "aabbbbbb-7bfb-395d-eb95-0685af2176b2", 5849 "abbbbbbb-7bfb-395d-eb95-0685af2176b2", 5850 "bbbbbbbb-7bfb-395d-eb95-0685af2176b2", 5851 } 5852 for i := 0; i < 9; i++ { 5853 alloc := mock.Alloc() 5854 alloc.ID = ids[i] 5855 allocs = append(allocs, alloc) 5856 } 5857 5858 for i, alloc := range allocs { 5859 state.UpsertJobSummary(uint64(900+i), mock.JobSummary(alloc.JobID)) 5860 } 5861 5862 err := state.UpsertAllocs(1000, allocs) 5863 if err != nil { 5864 t.Fatalf("err: %v", err) 5865 } 5866 5867 ws := memdb.NewWatchSet() 5868 iter, err := state.AllocsByIDPrefix(ws, structs.DefaultNamespace, "aaaa") 5869 if err != nil { 5870 t.Fatalf("err: %v", err) 5871 } 5872 5873 gatherAllocs := func(iter memdb.ResultIterator) []*structs.Allocation { 5874 var allocs []*structs.Allocation 5875 for { 5876 raw := iter.Next() 5877 if raw == nil { 5878 break 5879 } 5880 allocs = append(allocs, raw.(*structs.Allocation)) 5881 } 5882 return allocs 5883 } 5884 5885 out := gatherAllocs(iter) 5886 if len(out) != 5 { 5887 t.Fatalf("bad: expected five allocations, got: %#v", out) 5888 } 5889 5890 sort.Sort(AllocIDSort(allocs)) 5891 5892 for index, alloc := range out { 5893 if ids[index] != alloc.ID { 5894 t.Fatalf("bad: got unexpected id: %s", alloc.ID) 5895 } 5896 } 5897 5898 iter, err = state.AllocsByIDPrefix(ws, structs.DefaultNamespace, "b-a7bfb") 5899 if err != nil { 5900 t.Fatalf("err: %v", err) 5901 } 5902 5903 out = gatherAllocs(iter) 5904 if len(out) != 0 { 5905 t.Fatalf("bad: unexpected zero allocations, got: %#v", out) 5906 } 5907 5908 if watchFired(ws) { 5909 t.Fatalf("bad") 5910 } 5911 } 5912 5913 func TestStateStore_Allocs(t *testing.T) { 5914 t.Parallel() 5915 5916 state := testStateStore(t) 5917 var allocs []*structs.Allocation 5918 5919 for i := 0; i < 10; i++ { 5920 alloc := mock.Alloc() 5921 allocs = append(allocs, alloc) 5922 } 5923 for i, alloc := range allocs { 5924 state.UpsertJobSummary(uint64(900+i), mock.JobSummary(alloc.JobID)) 5925 } 5926 5927 err := state.UpsertAllocs(1000, allocs) 5928 if err != nil { 5929 t.Fatalf("err: %v", err) 5930 } 5931 5932 ws := memdb.NewWatchSet() 5933 iter, err := state.Allocs(ws) 5934 if err != nil { 5935 t.Fatalf("err: %v", err) 5936 } 5937 5938 var out []*structs.Allocation 5939 for { 5940 raw := iter.Next() 5941 if raw == nil { 5942 break 5943 } 5944 out = append(out, raw.(*structs.Allocation)) 5945 } 5946 5947 sort.Sort(AllocIDSort(allocs)) 5948 sort.Sort(AllocIDSort(out)) 5949 5950 if !reflect.DeepEqual(allocs, out) { 5951 t.Fatalf("bad: %#v %#v", allocs, out) 5952 } 5953 5954 if watchFired(ws) { 5955 t.Fatalf("bad") 5956 } 5957 } 5958 5959 func TestStateStore_Allocs_PrevAlloc(t *testing.T) { 5960 t.Parallel() 5961 5962 state := testStateStore(t) 5963 var allocs []*structs.Allocation 5964 5965 require := require.New(t) 5966 for i := 0; i < 5; i++ { 5967 alloc := mock.Alloc() 5968 allocs = append(allocs, alloc) 5969 } 5970 for i, alloc := range allocs { 5971 state.UpsertJobSummary(uint64(900+i), mock.JobSummary(alloc.JobID)) 5972 } 5973 // Set some previous alloc ids 5974 allocs[1].PreviousAllocation = allocs[0].ID 5975 allocs[2].PreviousAllocation = allocs[1].ID 5976 5977 err := state.UpsertAllocs(1000, allocs) 5978 require.Nil(err) 5979 5980 ws := memdb.NewWatchSet() 5981 iter, err := state.Allocs(ws) 5982 require.Nil(err) 5983 5984 var out []*structs.Allocation 5985 for { 5986 raw := iter.Next() 5987 if raw == nil { 5988 break 5989 } 5990 out = append(out, raw.(*structs.Allocation)) 5991 } 5992 5993 // Set expected NextAllocation fields 5994 allocs[0].NextAllocation = allocs[1].ID 5995 allocs[1].NextAllocation = allocs[2].ID 5996 5997 sort.Sort(AllocIDSort(allocs)) 5998 sort.Sort(AllocIDSort(out)) 5999 6000 require.Equal(allocs, out) 6001 require.False(watchFired(ws)) 6002 6003 // Insert another alloc, verify index of previous alloc also got updated 6004 alloc := mock.Alloc() 6005 alloc.PreviousAllocation = allocs[0].ID 6006 err = state.UpsertAllocs(1001, []*structs.Allocation{alloc}) 6007 require.Nil(err) 6008 alloc0, err := state.AllocByID(nil, allocs[0].ID) 6009 require.Nil(err) 6010 require.Equal(alloc0.ModifyIndex, uint64(1001)) 6011 } 6012 6013 func TestStateStore_RestoreAlloc(t *testing.T) { 6014 t.Parallel() 6015 6016 state := testStateStore(t) 6017 alloc := mock.Alloc() 6018 6019 restore, err := state.Restore() 6020 if err != nil { 6021 t.Fatalf("err: %v", err) 6022 } 6023 6024 err = restore.AllocRestore(alloc) 6025 if err != nil { 6026 t.Fatalf("err: %v", err) 6027 } 6028 6029 restore.Commit() 6030 6031 ws := memdb.NewWatchSet() 6032 out, err := state.AllocByID(ws, alloc.ID) 6033 if err != nil { 6034 t.Fatalf("err: %v", err) 6035 } 6036 6037 if !reflect.DeepEqual(out, alloc) { 6038 t.Fatalf("Bad: %#v %#v", out, alloc) 6039 } 6040 6041 if watchFired(ws) { 6042 t.Fatalf("bad") 6043 } 6044 } 6045 6046 func TestStateStore_SetJobStatus_ForceStatus(t *testing.T) { 6047 t.Parallel() 6048 6049 state := testStateStore(t) 6050 txn := state.db.Txn(true) 6051 6052 // Create and insert a mock job. 6053 job := mock.Job() 6054 job.Status = "" 6055 job.ModifyIndex = 0 6056 if err := txn.Insert("jobs", job); err != nil { 6057 t.Fatalf("job insert failed: %v", err) 6058 } 6059 6060 exp := "foobar" 6061 index := uint64(1000) 6062 if err := state.setJobStatus(index, txn, job, false, exp); err != nil { 6063 t.Fatalf("setJobStatus() failed: %v", err) 6064 } 6065 6066 i, err := txn.First("jobs", "id", job.Namespace, job.ID) 6067 if err != nil { 6068 t.Fatalf("job lookup failed: %v", err) 6069 } 6070 updated := i.(*structs.Job) 6071 6072 if updated.Status != exp { 6073 t.Fatalf("setJobStatus() set %v; expected %v", updated.Status, exp) 6074 } 6075 6076 if updated.ModifyIndex != index { 6077 t.Fatalf("setJobStatus() set %d; expected %d", updated.ModifyIndex, index) 6078 } 6079 } 6080 6081 func TestStateStore_SetJobStatus_NoOp(t *testing.T) { 6082 t.Parallel() 6083 6084 state := testStateStore(t) 6085 txn := state.db.Txn(true) 6086 6087 // Create and insert a mock job that should be pending. 6088 job := mock.Job() 6089 job.Status = structs.JobStatusPending 6090 job.ModifyIndex = 10 6091 if err := txn.Insert("jobs", job); err != nil { 6092 t.Fatalf("job insert failed: %v", err) 6093 } 6094 6095 index := uint64(1000) 6096 if err := state.setJobStatus(index, txn, job, false, ""); err != nil { 6097 t.Fatalf("setJobStatus() failed: %v", err) 6098 } 6099 6100 i, err := txn.First("jobs", "id", job.Namespace, job.ID) 6101 if err != nil { 6102 t.Fatalf("job lookup failed: %v", err) 6103 } 6104 updated := i.(*structs.Job) 6105 6106 if updated.ModifyIndex == index { 6107 t.Fatalf("setJobStatus() should have been a no-op") 6108 } 6109 } 6110 6111 func TestStateStore_SetJobStatus(t *testing.T) { 6112 t.Parallel() 6113 6114 state := testStateStore(t) 6115 txn := state.db.Txn(true) 6116 6117 // Create and insert a mock job that should be pending but has an incorrect 6118 // status. 6119 job := mock.Job() 6120 job.Status = "foobar" 6121 job.ModifyIndex = 10 6122 if err := txn.Insert("jobs", job); err != nil { 6123 t.Fatalf("job insert failed: %v", err) 6124 } 6125 6126 index := uint64(1000) 6127 if err := state.setJobStatus(index, txn, job, false, ""); err != nil { 6128 t.Fatalf("setJobStatus() failed: %v", err) 6129 } 6130 6131 i, err := txn.First("jobs", "id", job.Namespace, job.ID) 6132 if err != nil { 6133 t.Fatalf("job lookup failed: %v", err) 6134 } 6135 updated := i.(*structs.Job) 6136 6137 if updated.Status != structs.JobStatusPending { 6138 t.Fatalf("setJobStatus() set %v; expected %v", updated.Status, structs.JobStatusPending) 6139 } 6140 6141 if updated.ModifyIndex != index { 6142 t.Fatalf("setJobStatus() set %d; expected %d", updated.ModifyIndex, index) 6143 } 6144 } 6145 6146 func TestStateStore_GetJobStatus_NoEvalsOrAllocs(t *testing.T) { 6147 t.Parallel() 6148 6149 job := mock.Job() 6150 state := testStateStore(t) 6151 txn := state.db.Txn(false) 6152 status, err := state.getJobStatus(txn, job, false) 6153 if err != nil { 6154 t.Fatalf("getJobStatus() failed: %v", err) 6155 } 6156 6157 if status != structs.JobStatusPending { 6158 t.Fatalf("getJobStatus() returned %v; expected %v", status, structs.JobStatusPending) 6159 } 6160 } 6161 6162 func TestStateStore_GetJobStatus_NoEvalsOrAllocs_Periodic(t *testing.T) { 6163 t.Parallel() 6164 6165 job := mock.PeriodicJob() 6166 state := testStateStore(t) 6167 txn := state.db.Txn(false) 6168 status, err := state.getJobStatus(txn, job, false) 6169 if err != nil { 6170 t.Fatalf("getJobStatus() failed: %v", err) 6171 } 6172 6173 if status != structs.JobStatusRunning { 6174 t.Fatalf("getJobStatus() returned %v; expected %v", status, structs.JobStatusRunning) 6175 } 6176 } 6177 6178 func TestStateStore_GetJobStatus_NoEvalsOrAllocs_EvalDelete(t *testing.T) { 6179 t.Parallel() 6180 6181 job := mock.Job() 6182 state := testStateStore(t) 6183 txn := state.db.Txn(false) 6184 status, err := state.getJobStatus(txn, job, true) 6185 if err != nil { 6186 t.Fatalf("getJobStatus() failed: %v", err) 6187 } 6188 6189 if status != structs.JobStatusDead { 6190 t.Fatalf("getJobStatus() returned %v; expected %v", status, structs.JobStatusDead) 6191 } 6192 } 6193 6194 func TestStateStore_GetJobStatus_DeadEvalsAndAllocs(t *testing.T) { 6195 t.Parallel() 6196 6197 state := testStateStore(t) 6198 job := mock.Job() 6199 6200 // Create a mock alloc that is dead. 6201 alloc := mock.Alloc() 6202 alloc.JobID = job.ID 6203 alloc.DesiredStatus = structs.AllocDesiredStatusStop 6204 state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)) 6205 if err := state.UpsertAllocs(1000, []*structs.Allocation{alloc}); err != nil { 6206 t.Fatalf("err: %v", err) 6207 } 6208 6209 // Create a mock eval that is complete 6210 eval := mock.Eval() 6211 eval.JobID = job.ID 6212 eval.Status = structs.EvalStatusComplete 6213 if err := state.UpsertEvals(1001, []*structs.Evaluation{eval}); err != nil { 6214 t.Fatalf("err: %v", err) 6215 } 6216 6217 txn := state.db.Txn(false) 6218 status, err := state.getJobStatus(txn, job, false) 6219 if err != nil { 6220 t.Fatalf("getJobStatus() failed: %v", err) 6221 } 6222 6223 if status != structs.JobStatusDead { 6224 t.Fatalf("getJobStatus() returned %v; expected %v", status, structs.JobStatusDead) 6225 } 6226 } 6227 6228 func TestStateStore_GetJobStatus_RunningAlloc(t *testing.T) { 6229 t.Parallel() 6230 6231 state := testStateStore(t) 6232 job := mock.Job() 6233 6234 // Create a mock alloc that is running. 6235 alloc := mock.Alloc() 6236 alloc.JobID = job.ID 6237 alloc.DesiredStatus = structs.AllocDesiredStatusRun 6238 state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)) 6239 if err := state.UpsertAllocs(1000, []*structs.Allocation{alloc}); err != nil { 6240 t.Fatalf("err: %v", err) 6241 } 6242 6243 txn := state.db.Txn(false) 6244 status, err := state.getJobStatus(txn, job, true) 6245 if err != nil { 6246 t.Fatalf("getJobStatus() failed: %v", err) 6247 } 6248 6249 if status != structs.JobStatusRunning { 6250 t.Fatalf("getJobStatus() returned %v; expected %v", status, structs.JobStatusRunning) 6251 } 6252 } 6253 6254 func TestStateStore_GetJobStatus_PeriodicJob(t *testing.T) { 6255 t.Parallel() 6256 6257 state := testStateStore(t) 6258 job := mock.PeriodicJob() 6259 6260 txn := state.db.Txn(false) 6261 status, err := state.getJobStatus(txn, job, false) 6262 if err != nil { 6263 t.Fatalf("getJobStatus() failed: %v", err) 6264 } 6265 6266 if status != structs.JobStatusRunning { 6267 t.Fatalf("getJobStatus() returned %v; expected %v", status, structs.JobStatusRunning) 6268 } 6269 6270 // Mark it as stopped 6271 job.Stop = true 6272 status, err = state.getJobStatus(txn, job, false) 6273 if err != nil { 6274 t.Fatalf("getJobStatus() failed: %v", err) 6275 } 6276 6277 if status != structs.JobStatusDead { 6278 t.Fatalf("getJobStatus() returned %v; expected %v", status, structs.JobStatusDead) 6279 } 6280 } 6281 6282 func TestStateStore_GetJobStatus_ParameterizedJob(t *testing.T) { 6283 t.Parallel() 6284 6285 state := testStateStore(t) 6286 job := mock.Job() 6287 job.ParameterizedJob = &structs.ParameterizedJobConfig{} 6288 6289 txn := state.db.Txn(false) 6290 status, err := state.getJobStatus(txn, job, false) 6291 if err != nil { 6292 t.Fatalf("getJobStatus() failed: %v", err) 6293 } 6294 6295 if status != structs.JobStatusRunning { 6296 t.Fatalf("getJobStatus() returned %v; expected %v", status, structs.JobStatusRunning) 6297 } 6298 6299 // Mark it as stopped 6300 job.Stop = true 6301 status, err = state.getJobStatus(txn, job, false) 6302 if err != nil { 6303 t.Fatalf("getJobStatus() failed: %v", err) 6304 } 6305 6306 if status != structs.JobStatusDead { 6307 t.Fatalf("getJobStatus() returned %v; expected %v", status, structs.JobStatusDead) 6308 } 6309 } 6310 6311 func TestStateStore_SetJobStatus_PendingEval(t *testing.T) { 6312 t.Parallel() 6313 6314 state := testStateStore(t) 6315 job := mock.Job() 6316 6317 // Create a mock eval that is pending. 6318 eval := mock.Eval() 6319 eval.JobID = job.ID 6320 eval.Status = structs.EvalStatusPending 6321 if err := state.UpsertEvals(1000, []*structs.Evaluation{eval}); err != nil { 6322 t.Fatalf("err: %v", err) 6323 } 6324 6325 txn := state.db.Txn(false) 6326 status, err := state.getJobStatus(txn, job, true) 6327 if err != nil { 6328 t.Fatalf("getJobStatus() failed: %v", err) 6329 } 6330 6331 if status != structs.JobStatusPending { 6332 t.Fatalf("getJobStatus() returned %v; expected %v", status, structs.JobStatusPending) 6333 } 6334 } 6335 6336 // TestStateStore_SetJobStatus_SystemJob asserts that system jobs are still 6337 // considered running until explicitly stopped. 6338 func TestStateStore_SetJobStatus_SystemJob(t *testing.T) { 6339 t.Parallel() 6340 6341 state := testStateStore(t) 6342 job := mock.SystemJob() 6343 6344 // Create a mock eval that is pending. 6345 eval := mock.Eval() 6346 eval.JobID = job.ID 6347 eval.Type = job.Type 6348 eval.Status = structs.EvalStatusComplete 6349 if err := state.UpsertEvals(1000, []*structs.Evaluation{eval}); err != nil { 6350 t.Fatalf("err: %v", err) 6351 } 6352 6353 txn := state.db.Txn(false) 6354 status, err := state.getJobStatus(txn, job, true) 6355 if err != nil { 6356 t.Fatalf("getJobStatus() failed: %v", err) 6357 } 6358 6359 if expected := structs.JobStatusRunning; status != expected { 6360 t.Fatalf("getJobStatus() returned %v; expected %v", status, expected) 6361 } 6362 6363 // Stop the job 6364 job.Stop = true 6365 status, err = state.getJobStatus(txn, job, true) 6366 if err != nil { 6367 t.Fatalf("getJobStatus() failed: %v", err) 6368 } 6369 6370 if expected := structs.JobStatusDead; status != expected { 6371 t.Fatalf("getJobStatus() returned %v; expected %v", status, expected) 6372 } 6373 } 6374 6375 func TestStateJobSummary_UpdateJobCount(t *testing.T) { 6376 t.Parallel() 6377 6378 state := testStateStore(t) 6379 alloc := mock.Alloc() 6380 job := alloc.Job 6381 job.TaskGroups[0].Count = 3 6382 6383 // Create watchsets so we can test that upsert fires the watch 6384 ws := memdb.NewWatchSet() 6385 if _, err := state.JobSummaryByID(ws, job.Namespace, job.ID); err != nil { 6386 t.Fatalf("bad: %v", err) 6387 } 6388 6389 if err := state.UpsertJob(1000, job); err != nil { 6390 t.Fatalf("err: %v", err) 6391 } 6392 6393 if err := state.UpsertAllocs(1001, []*structs.Allocation{alloc}); err != nil { 6394 t.Fatalf("err: %v", err) 6395 } 6396 6397 if !watchFired(ws) { 6398 t.Fatalf("bad") 6399 } 6400 6401 ws = memdb.NewWatchSet() 6402 summary, _ := state.JobSummaryByID(ws, job.Namespace, job.ID) 6403 expectedSummary := structs.JobSummary{ 6404 JobID: job.ID, 6405 Namespace: job.Namespace, 6406 Summary: map[string]structs.TaskGroupSummary{ 6407 "web": { 6408 Starting: 1, 6409 }, 6410 }, 6411 Children: new(structs.JobChildrenSummary), 6412 CreateIndex: 1000, 6413 ModifyIndex: 1001, 6414 } 6415 if !reflect.DeepEqual(summary, &expectedSummary) { 6416 t.Fatalf("expected: %v, actual: %v", expectedSummary, summary) 6417 } 6418 6419 // Create watchsets so we can test that upsert fires the watch 6420 ws2 := memdb.NewWatchSet() 6421 if _, err := state.JobSummaryByID(ws2, job.Namespace, job.ID); err != nil { 6422 t.Fatalf("bad: %v", err) 6423 } 6424 6425 alloc2 := mock.Alloc() 6426 alloc2.Job = job 6427 alloc2.JobID = job.ID 6428 6429 alloc3 := mock.Alloc() 6430 alloc3.Job = job 6431 alloc3.JobID = job.ID 6432 6433 if err := state.UpsertAllocs(1002, []*structs.Allocation{alloc2, alloc3}); err != nil { 6434 t.Fatalf("err: %v", err) 6435 } 6436 6437 if !watchFired(ws2) { 6438 t.Fatalf("bad") 6439 } 6440 6441 outA, _ := state.AllocByID(ws, alloc3.ID) 6442 6443 summary, _ = state.JobSummaryByID(ws, job.Namespace, job.ID) 6444 expectedSummary = structs.JobSummary{ 6445 JobID: job.ID, 6446 Namespace: job.Namespace, 6447 Summary: map[string]structs.TaskGroupSummary{ 6448 "web": { 6449 Starting: 3, 6450 }, 6451 }, 6452 Children: new(structs.JobChildrenSummary), 6453 CreateIndex: job.CreateIndex, 6454 ModifyIndex: outA.ModifyIndex, 6455 } 6456 if !reflect.DeepEqual(summary, &expectedSummary) { 6457 t.Fatalf("expected summary: %v, actual: %v", expectedSummary, summary) 6458 } 6459 6460 // Create watchsets so we can test that upsert fires the watch 6461 ws3 := memdb.NewWatchSet() 6462 if _, err := state.JobSummaryByID(ws3, job.Namespace, job.ID); err != nil { 6463 t.Fatalf("bad: %v", err) 6464 } 6465 6466 alloc4 := mock.Alloc() 6467 alloc4.ID = alloc2.ID 6468 alloc4.Job = alloc2.Job 6469 alloc4.JobID = alloc2.JobID 6470 alloc4.ClientStatus = structs.AllocClientStatusComplete 6471 6472 alloc5 := mock.Alloc() 6473 alloc5.ID = alloc3.ID 6474 alloc5.Job = alloc3.Job 6475 alloc5.JobID = alloc3.JobID 6476 alloc5.ClientStatus = structs.AllocClientStatusComplete 6477 6478 if err := state.UpdateAllocsFromClient(1004, []*structs.Allocation{alloc4, alloc5}); err != nil { 6479 t.Fatalf("err: %v", err) 6480 } 6481 6482 if !watchFired(ws2) { 6483 t.Fatalf("bad") 6484 } 6485 6486 outA, _ = state.AllocByID(ws, alloc5.ID) 6487 summary, _ = state.JobSummaryByID(ws, job.Namespace, job.ID) 6488 expectedSummary = structs.JobSummary{ 6489 JobID: job.ID, 6490 Namespace: job.Namespace, 6491 Summary: map[string]structs.TaskGroupSummary{ 6492 "web": { 6493 Complete: 2, 6494 Starting: 1, 6495 }, 6496 }, 6497 Children: new(structs.JobChildrenSummary), 6498 CreateIndex: job.CreateIndex, 6499 ModifyIndex: outA.ModifyIndex, 6500 } 6501 if !reflect.DeepEqual(summary, &expectedSummary) { 6502 t.Fatalf("expected: %v, actual: %v", expectedSummary, summary) 6503 } 6504 } 6505 6506 func TestJobSummary_UpdateClientStatus(t *testing.T) { 6507 t.Parallel() 6508 6509 state := testStateStore(t) 6510 alloc := mock.Alloc() 6511 job := alloc.Job 6512 job.TaskGroups[0].Count = 3 6513 6514 alloc2 := mock.Alloc() 6515 alloc2.Job = job 6516 alloc2.JobID = job.ID 6517 6518 alloc3 := mock.Alloc() 6519 alloc3.Job = job 6520 alloc3.JobID = job.ID 6521 6522 err := state.UpsertJob(1000, job) 6523 if err != nil { 6524 t.Fatalf("err: %v", err) 6525 } 6526 6527 if err := state.UpsertAllocs(1001, []*structs.Allocation{alloc, alloc2, alloc3}); err != nil { 6528 t.Fatalf("err: %v", err) 6529 } 6530 6531 ws := memdb.NewWatchSet() 6532 summary, _ := state.JobSummaryByID(ws, job.Namespace, job.ID) 6533 if summary.Summary["web"].Starting != 3 { 6534 t.Fatalf("bad job summary: %v", summary) 6535 } 6536 6537 alloc4 := mock.Alloc() 6538 alloc4.ID = alloc2.ID 6539 alloc4.Job = alloc2.Job 6540 alloc4.JobID = alloc2.JobID 6541 alloc4.ClientStatus = structs.AllocClientStatusComplete 6542 6543 alloc5 := mock.Alloc() 6544 alloc5.ID = alloc3.ID 6545 alloc5.Job = alloc3.Job 6546 alloc5.JobID = alloc3.JobID 6547 alloc5.ClientStatus = structs.AllocClientStatusFailed 6548 6549 alloc6 := mock.Alloc() 6550 alloc6.ID = alloc.ID 6551 alloc6.Job = alloc.Job 6552 alloc6.JobID = alloc.JobID 6553 alloc6.ClientStatus = structs.AllocClientStatusRunning 6554 6555 if err := state.UpdateAllocsFromClient(1002, []*structs.Allocation{alloc4, alloc5, alloc6}); err != nil { 6556 t.Fatalf("err: %v", err) 6557 } 6558 6559 if !watchFired(ws) { 6560 t.Fatalf("bad") 6561 } 6562 6563 summary, _ = state.JobSummaryByID(ws, job.Namespace, job.ID) 6564 if summary.Summary["web"].Running != 1 || summary.Summary["web"].Failed != 1 || summary.Summary["web"].Complete != 1 { 6565 t.Fatalf("bad job summary: %v", summary) 6566 } 6567 6568 alloc7 := mock.Alloc() 6569 alloc7.Job = alloc.Job 6570 alloc7.JobID = alloc.JobID 6571 6572 if err := state.UpsertAllocs(1003, []*structs.Allocation{alloc7}); err != nil { 6573 t.Fatalf("err: %v", err) 6574 } 6575 summary, _ = state.JobSummaryByID(ws, job.Namespace, job.ID) 6576 if summary.Summary["web"].Starting != 1 || summary.Summary["web"].Running != 1 || summary.Summary["web"].Failed != 1 || summary.Summary["web"].Complete != 1 { 6577 t.Fatalf("bad job summary: %v", summary) 6578 } 6579 } 6580 6581 // Test that nonexistent deployment can't be updated 6582 func TestStateStore_UpsertDeploymentStatusUpdate_Nonexistent(t *testing.T) { 6583 t.Parallel() 6584 6585 state := testStateStore(t) 6586 6587 // Update the nonexistent deployment 6588 req := &structs.DeploymentStatusUpdateRequest{ 6589 DeploymentUpdate: &structs.DeploymentStatusUpdate{ 6590 DeploymentID: uuid.Generate(), 6591 Status: structs.DeploymentStatusRunning, 6592 }, 6593 } 6594 err := state.UpdateDeploymentStatus(2, req) 6595 if err == nil || !strings.Contains(err.Error(), "does not exist") { 6596 t.Fatalf("expected error updating the status because the deployment doesn't exist") 6597 } 6598 } 6599 6600 // Test that terminal deployment can't be updated 6601 func TestStateStore_UpsertDeploymentStatusUpdate_Terminal(t *testing.T) { 6602 t.Parallel() 6603 6604 state := testStateStore(t) 6605 6606 // Insert a terminal deployment 6607 d := mock.Deployment() 6608 d.Status = structs.DeploymentStatusFailed 6609 6610 if err := state.UpsertDeployment(1, d); err != nil { 6611 t.Fatalf("bad: %v", err) 6612 } 6613 6614 // Update the deployment 6615 req := &structs.DeploymentStatusUpdateRequest{ 6616 DeploymentUpdate: &structs.DeploymentStatusUpdate{ 6617 DeploymentID: d.ID, 6618 Status: structs.DeploymentStatusRunning, 6619 }, 6620 } 6621 err := state.UpdateDeploymentStatus(2, req) 6622 if err == nil || !strings.Contains(err.Error(), "has terminal status") { 6623 t.Fatalf("expected error updating the status because the deployment is terminal") 6624 } 6625 } 6626 6627 // Test that a non terminal deployment is updated and that a job and eval are 6628 // created. 6629 func TestStateStore_UpsertDeploymentStatusUpdate_NonTerminal(t *testing.T) { 6630 t.Parallel() 6631 6632 state := testStateStore(t) 6633 6634 // Insert a deployment 6635 d := mock.Deployment() 6636 if err := state.UpsertDeployment(1, d); err != nil { 6637 t.Fatalf("bad: %v", err) 6638 } 6639 6640 // Create an eval and a job 6641 e := mock.Eval() 6642 j := mock.Job() 6643 6644 // Update the deployment 6645 status, desc := structs.DeploymentStatusFailed, "foo" 6646 req := &structs.DeploymentStatusUpdateRequest{ 6647 DeploymentUpdate: &structs.DeploymentStatusUpdate{ 6648 DeploymentID: d.ID, 6649 Status: status, 6650 StatusDescription: desc, 6651 }, 6652 Job: j, 6653 Eval: e, 6654 } 6655 err := state.UpdateDeploymentStatus(2, req) 6656 if err != nil { 6657 t.Fatalf("bad: %v", err) 6658 } 6659 6660 // Check that the status was updated properly 6661 ws := memdb.NewWatchSet() 6662 dout, err := state.DeploymentByID(ws, d.ID) 6663 if err != nil { 6664 t.Fatalf("bad: %v", err) 6665 } 6666 if dout.Status != status || dout.StatusDescription != desc { 6667 t.Fatalf("bad: %#v", dout) 6668 } 6669 6670 // Check that the evaluation was created 6671 eout, _ := state.EvalByID(ws, e.ID) 6672 if err != nil { 6673 t.Fatalf("bad: %v", err) 6674 } 6675 if eout == nil { 6676 t.Fatalf("bad: %#v", eout) 6677 } 6678 6679 // Check that the job was created 6680 jout, _ := state.JobByID(ws, j.Namespace, j.ID) 6681 if err != nil { 6682 t.Fatalf("bad: %v", err) 6683 } 6684 if jout == nil { 6685 t.Fatalf("bad: %#v", jout) 6686 } 6687 } 6688 6689 // Test that when a deployment is updated to successful the job is updated to 6690 // stable 6691 func TestStateStore_UpsertDeploymentStatusUpdate_Successful(t *testing.T) { 6692 t.Parallel() 6693 6694 state := testStateStore(t) 6695 6696 // Insert a job 6697 job := mock.Job() 6698 if err := state.UpsertJob(1, job); err != nil { 6699 t.Fatalf("bad: %v", err) 6700 } 6701 6702 // Insert a deployment 6703 d := structs.NewDeployment(job) 6704 if err := state.UpsertDeployment(2, d); err != nil { 6705 t.Fatalf("bad: %v", err) 6706 } 6707 6708 // Update the deployment 6709 req := &structs.DeploymentStatusUpdateRequest{ 6710 DeploymentUpdate: &structs.DeploymentStatusUpdate{ 6711 DeploymentID: d.ID, 6712 Status: structs.DeploymentStatusSuccessful, 6713 StatusDescription: structs.DeploymentStatusDescriptionSuccessful, 6714 }, 6715 } 6716 err := state.UpdateDeploymentStatus(3, req) 6717 if err != nil { 6718 t.Fatalf("bad: %v", err) 6719 } 6720 6721 // Check that the status was updated properly 6722 ws := memdb.NewWatchSet() 6723 dout, err := state.DeploymentByID(ws, d.ID) 6724 if err != nil { 6725 t.Fatalf("bad: %v", err) 6726 } 6727 if dout.Status != structs.DeploymentStatusSuccessful || 6728 dout.StatusDescription != structs.DeploymentStatusDescriptionSuccessful { 6729 t.Fatalf("bad: %#v", dout) 6730 } 6731 6732 // Check that the job was created 6733 jout, _ := state.JobByID(ws, job.Namespace, job.ID) 6734 if err != nil { 6735 t.Fatalf("bad: %v", err) 6736 } 6737 if jout == nil { 6738 t.Fatalf("bad: %#v", jout) 6739 } 6740 if !jout.Stable { 6741 t.Fatalf("job not marked stable %#v", jout) 6742 } 6743 if jout.Version != d.JobVersion { 6744 t.Fatalf("job version changed; got %d; want %d", jout.Version, d.JobVersion) 6745 } 6746 } 6747 6748 func TestStateStore_UpdateJobStability(t *testing.T) { 6749 t.Parallel() 6750 6751 state := testStateStore(t) 6752 6753 // Insert a job twice to get two versions 6754 job := mock.Job() 6755 if err := state.UpsertJob(1, job); err != nil { 6756 t.Fatalf("bad: %v", err) 6757 } 6758 6759 if err := state.UpsertJob(2, job); err != nil { 6760 t.Fatalf("bad: %v", err) 6761 } 6762 6763 // Update the stability to true 6764 err := state.UpdateJobStability(3, job.Namespace, job.ID, 0, true) 6765 if err != nil { 6766 t.Fatalf("bad: %v", err) 6767 } 6768 6769 // Check that the job was updated properly 6770 ws := memdb.NewWatchSet() 6771 jout, _ := state.JobByIDAndVersion(ws, job.Namespace, job.ID, 0) 6772 if err != nil { 6773 t.Fatalf("bad: %v", err) 6774 } 6775 if jout == nil { 6776 t.Fatalf("bad: %#v", jout) 6777 } 6778 if !jout.Stable { 6779 t.Fatalf("job not marked stable %#v", jout) 6780 } 6781 6782 // Update the stability to false 6783 err = state.UpdateJobStability(3, job.Namespace, job.ID, 0, false) 6784 if err != nil { 6785 t.Fatalf("bad: %v", err) 6786 } 6787 6788 // Check that the job was updated properly 6789 jout, _ = state.JobByIDAndVersion(ws, job.Namespace, job.ID, 0) 6790 if err != nil { 6791 t.Fatalf("bad: %v", err) 6792 } 6793 if jout == nil { 6794 t.Fatalf("bad: %#v", jout) 6795 } 6796 if jout.Stable { 6797 t.Fatalf("job marked stable %#v", jout) 6798 } 6799 } 6800 6801 // Test that nonexistent deployment can't be promoted 6802 func TestStateStore_UpsertDeploymentPromotion_Nonexistent(t *testing.T) { 6803 t.Parallel() 6804 6805 state := testStateStore(t) 6806 6807 // Promote the nonexistent deployment 6808 req := &structs.ApplyDeploymentPromoteRequest{ 6809 DeploymentPromoteRequest: structs.DeploymentPromoteRequest{ 6810 DeploymentID: uuid.Generate(), 6811 All: true, 6812 }, 6813 } 6814 err := state.UpdateDeploymentPromotion(2, req) 6815 if err == nil || !strings.Contains(err.Error(), "does not exist") { 6816 t.Fatalf("expected error promoting because the deployment doesn't exist") 6817 } 6818 } 6819 6820 // Test that terminal deployment can't be updated 6821 func TestStateStore_UpsertDeploymentPromotion_Terminal(t *testing.T) { 6822 t.Parallel() 6823 6824 state := testStateStore(t) 6825 6826 // Insert a terminal deployment 6827 d := mock.Deployment() 6828 d.Status = structs.DeploymentStatusFailed 6829 6830 if err := state.UpsertDeployment(1, d); err != nil { 6831 t.Fatalf("bad: %v", err) 6832 } 6833 6834 // Promote the deployment 6835 req := &structs.ApplyDeploymentPromoteRequest{ 6836 DeploymentPromoteRequest: structs.DeploymentPromoteRequest{ 6837 DeploymentID: d.ID, 6838 All: true, 6839 }, 6840 } 6841 err := state.UpdateDeploymentPromotion(2, req) 6842 if err == nil || !strings.Contains(err.Error(), "has terminal status") { 6843 t.Fatalf("expected error updating the status because the deployment is terminal: %v", err) 6844 } 6845 } 6846 6847 // Test promoting unhealthy canaries in a deployment. 6848 func TestStateStore_UpsertDeploymentPromotion_Unhealthy(t *testing.T) { 6849 t.Parallel() 6850 6851 state := testStateStore(t) 6852 require := require.New(t) 6853 6854 // Create a job 6855 j := mock.Job() 6856 require.Nil(state.UpsertJob(1, j)) 6857 6858 // Create a deployment 6859 d := mock.Deployment() 6860 d.JobID = j.ID 6861 d.TaskGroups["web"].DesiredCanaries = 2 6862 require.Nil(state.UpsertDeployment(2, d)) 6863 6864 // Create a set of allocations 6865 c1 := mock.Alloc() 6866 c1.JobID = j.ID 6867 c1.DeploymentID = d.ID 6868 d.TaskGroups[c1.TaskGroup].PlacedCanaries = append(d.TaskGroups[c1.TaskGroup].PlacedCanaries, c1.ID) 6869 c2 := mock.Alloc() 6870 c2.JobID = j.ID 6871 c2.DeploymentID = d.ID 6872 d.TaskGroups[c2.TaskGroup].PlacedCanaries = append(d.TaskGroups[c2.TaskGroup].PlacedCanaries, c2.ID) 6873 6874 // Create a healthy but terminal alloc 6875 c3 := mock.Alloc() 6876 c3.JobID = j.ID 6877 c3.DeploymentID = d.ID 6878 c3.DesiredStatus = structs.AllocDesiredStatusStop 6879 c3.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: helper.BoolToPtr(true)} 6880 d.TaskGroups[c3.TaskGroup].PlacedCanaries = append(d.TaskGroups[c3.TaskGroup].PlacedCanaries, c3.ID) 6881 6882 require.Nil(state.UpsertAllocs(3, []*structs.Allocation{c1, c2, c3})) 6883 6884 // Promote the canaries 6885 req := &structs.ApplyDeploymentPromoteRequest{ 6886 DeploymentPromoteRequest: structs.DeploymentPromoteRequest{ 6887 DeploymentID: d.ID, 6888 All: true, 6889 }, 6890 } 6891 err := state.UpdateDeploymentPromotion(4, req) 6892 require.NotNil(err) 6893 require.Contains(err.Error(), `Task group "web" has 0/2 healthy allocations`) 6894 } 6895 6896 // Test promoting a deployment with no canaries 6897 func TestStateStore_UpsertDeploymentPromotion_NoCanaries(t *testing.T) { 6898 t.Parallel() 6899 6900 state := testStateStore(t) 6901 require := require.New(t) 6902 6903 // Create a job 6904 j := mock.Job() 6905 require.Nil(state.UpsertJob(1, j)) 6906 6907 // Create a deployment 6908 d := mock.Deployment() 6909 d.TaskGroups["web"].DesiredCanaries = 2 6910 d.JobID = j.ID 6911 require.Nil(state.UpsertDeployment(2, d)) 6912 6913 // Promote the canaries 6914 req := &structs.ApplyDeploymentPromoteRequest{ 6915 DeploymentPromoteRequest: structs.DeploymentPromoteRequest{ 6916 DeploymentID: d.ID, 6917 All: true, 6918 }, 6919 } 6920 err := state.UpdateDeploymentPromotion(4, req) 6921 require.NotNil(err) 6922 require.Contains(err.Error(), `Task group "web" has 0/2 healthy allocations`) 6923 } 6924 6925 // Test promoting all canaries in a deployment. 6926 func TestStateStore_UpsertDeploymentPromotion_All(t *testing.T) { 6927 t.Parallel() 6928 6929 state := testStateStore(t) 6930 6931 // Create a job with two task groups 6932 j := mock.Job() 6933 tg1 := j.TaskGroups[0] 6934 tg2 := tg1.Copy() 6935 tg2.Name = "foo" 6936 j.TaskGroups = append(j.TaskGroups, tg2) 6937 if err := state.UpsertJob(1, j); err != nil { 6938 t.Fatalf("bad: %v", err) 6939 } 6940 6941 // Create a deployment 6942 d := mock.Deployment() 6943 d.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion 6944 d.JobID = j.ID 6945 d.TaskGroups = map[string]*structs.DeploymentState{ 6946 "web": { 6947 DesiredTotal: 10, 6948 DesiredCanaries: 1, 6949 }, 6950 "foo": { 6951 DesiredTotal: 10, 6952 DesiredCanaries: 1, 6953 }, 6954 } 6955 if err := state.UpsertDeployment(2, d); err != nil { 6956 t.Fatalf("bad: %v", err) 6957 } 6958 6959 // Create a set of allocations 6960 c1 := mock.Alloc() 6961 c1.JobID = j.ID 6962 c1.DeploymentID = d.ID 6963 d.TaskGroups[c1.TaskGroup].PlacedCanaries = append(d.TaskGroups[c1.TaskGroup].PlacedCanaries, c1.ID) 6964 c1.DeploymentStatus = &structs.AllocDeploymentStatus{ 6965 Healthy: helper.BoolToPtr(true), 6966 } 6967 c2 := mock.Alloc() 6968 c2.JobID = j.ID 6969 c2.DeploymentID = d.ID 6970 d.TaskGroups[c2.TaskGroup].PlacedCanaries = append(d.TaskGroups[c2.TaskGroup].PlacedCanaries, c2.ID) 6971 c2.TaskGroup = tg2.Name 6972 c2.DeploymentStatus = &structs.AllocDeploymentStatus{ 6973 Healthy: helper.BoolToPtr(true), 6974 } 6975 6976 if err := state.UpsertAllocs(3, []*structs.Allocation{c1, c2}); err != nil { 6977 t.Fatalf("err: %v", err) 6978 } 6979 6980 // Create an eval 6981 e := mock.Eval() 6982 6983 // Promote the canaries 6984 req := &structs.ApplyDeploymentPromoteRequest{ 6985 DeploymentPromoteRequest: structs.DeploymentPromoteRequest{ 6986 DeploymentID: d.ID, 6987 All: true, 6988 }, 6989 Eval: e, 6990 } 6991 err := state.UpdateDeploymentPromotion(4, req) 6992 if err != nil { 6993 t.Fatalf("bad: %v", err) 6994 } 6995 6996 // Check that the status per task group was updated properly 6997 ws := memdb.NewWatchSet() 6998 dout, err := state.DeploymentByID(ws, d.ID) 6999 if err != nil { 7000 t.Fatalf("bad: %v", err) 7001 } 7002 if dout.StatusDescription != structs.DeploymentStatusDescriptionRunning { 7003 t.Fatalf("status description not updated: got %v; want %v", dout.StatusDescription, structs.DeploymentStatusDescriptionRunning) 7004 } 7005 if len(dout.TaskGroups) != 2 { 7006 t.Fatalf("bad: %#v", dout.TaskGroups) 7007 } 7008 for tg, state := range dout.TaskGroups { 7009 if !state.Promoted { 7010 t.Fatalf("bad: group %q not promoted %#v", tg, state) 7011 } 7012 } 7013 7014 // Check that the evaluation was created 7015 eout, _ := state.EvalByID(ws, e.ID) 7016 if err != nil { 7017 t.Fatalf("bad: %v", err) 7018 } 7019 if eout == nil { 7020 t.Fatalf("bad: %#v", eout) 7021 } 7022 } 7023 7024 // Test promoting a subset of canaries in a deployment. 7025 func TestStateStore_UpsertDeploymentPromotion_Subset(t *testing.T) { 7026 t.Parallel() 7027 require := require.New(t) 7028 7029 state := testStateStore(t) 7030 7031 // Create a job with two task groups 7032 j := mock.Job() 7033 tg1 := j.TaskGroups[0] 7034 tg2 := tg1.Copy() 7035 tg2.Name = "foo" 7036 j.TaskGroups = append(j.TaskGroups, tg2) 7037 require.Nil(state.UpsertJob(1, j)) 7038 7039 // Create a deployment 7040 d := mock.Deployment() 7041 d.JobID = j.ID 7042 d.TaskGroups = map[string]*structs.DeploymentState{ 7043 "web": { 7044 DesiredTotal: 10, 7045 DesiredCanaries: 1, 7046 }, 7047 "foo": { 7048 DesiredTotal: 10, 7049 DesiredCanaries: 1, 7050 }, 7051 } 7052 require.Nil(state.UpsertDeployment(2, d)) 7053 7054 // Create a set of allocations for both groups, including an unhealthy one 7055 c1 := mock.Alloc() 7056 c1.JobID = j.ID 7057 c1.DeploymentID = d.ID 7058 d.TaskGroups[c1.TaskGroup].PlacedCanaries = append(d.TaskGroups[c1.TaskGroup].PlacedCanaries, c1.ID) 7059 c1.DeploymentStatus = &structs.AllocDeploymentStatus{ 7060 Healthy: helper.BoolToPtr(true), 7061 Canary: true, 7062 } 7063 7064 // Should still be a canary 7065 c2 := mock.Alloc() 7066 c2.JobID = j.ID 7067 c2.DeploymentID = d.ID 7068 d.TaskGroups[c2.TaskGroup].PlacedCanaries = append(d.TaskGroups[c2.TaskGroup].PlacedCanaries, c2.ID) 7069 c2.TaskGroup = tg2.Name 7070 c2.DeploymentStatus = &structs.AllocDeploymentStatus{ 7071 Healthy: helper.BoolToPtr(true), 7072 Canary: true, 7073 } 7074 7075 c3 := mock.Alloc() 7076 c3.JobID = j.ID 7077 c3.DeploymentID = d.ID 7078 d.TaskGroups[c3.TaskGroup].PlacedCanaries = append(d.TaskGroups[c3.TaskGroup].PlacedCanaries, c3.ID) 7079 c3.DeploymentStatus = &structs.AllocDeploymentStatus{ 7080 Healthy: helper.BoolToPtr(false), 7081 Canary: true, 7082 } 7083 7084 require.Nil(state.UpsertAllocs(3, []*structs.Allocation{c1, c2, c3})) 7085 7086 // Create an eval 7087 e := mock.Eval() 7088 7089 // Promote the canaries 7090 req := &structs.ApplyDeploymentPromoteRequest{ 7091 DeploymentPromoteRequest: structs.DeploymentPromoteRequest{ 7092 DeploymentID: d.ID, 7093 Groups: []string{"web"}, 7094 }, 7095 Eval: e, 7096 } 7097 require.Nil(state.UpdateDeploymentPromotion(4, req)) 7098 7099 // Check that the status per task group was updated properly 7100 ws := memdb.NewWatchSet() 7101 dout, err := state.DeploymentByID(ws, d.ID) 7102 require.Nil(err) 7103 require.Len(dout.TaskGroups, 2) 7104 require.Contains(dout.TaskGroups, "web") 7105 require.True(dout.TaskGroups["web"].Promoted) 7106 7107 // Check that the evaluation was created 7108 eout, err := state.EvalByID(ws, e.ID) 7109 require.Nil(err) 7110 require.NotNil(eout) 7111 7112 // Check the canary field was set properly 7113 aout1, err1 := state.AllocByID(ws, c1.ID) 7114 aout2, err2 := state.AllocByID(ws, c2.ID) 7115 aout3, err3 := state.AllocByID(ws, c3.ID) 7116 require.Nil(err1) 7117 require.Nil(err2) 7118 require.Nil(err3) 7119 require.NotNil(aout1) 7120 require.NotNil(aout2) 7121 require.NotNil(aout3) 7122 require.False(aout1.DeploymentStatus.Canary) 7123 require.True(aout2.DeploymentStatus.Canary) 7124 require.True(aout3.DeploymentStatus.Canary) 7125 } 7126 7127 // Test that allocation health can't be set against a nonexistent deployment 7128 func TestStateStore_UpsertDeploymentAllocHealth_Nonexistent(t *testing.T) { 7129 t.Parallel() 7130 7131 state := testStateStore(t) 7132 7133 // Set health against the nonexistent deployment 7134 req := &structs.ApplyDeploymentAllocHealthRequest{ 7135 DeploymentAllocHealthRequest: structs.DeploymentAllocHealthRequest{ 7136 DeploymentID: uuid.Generate(), 7137 HealthyAllocationIDs: []string{uuid.Generate()}, 7138 }, 7139 } 7140 err := state.UpdateDeploymentAllocHealth(2, req) 7141 if err == nil || !strings.Contains(err.Error(), "does not exist") { 7142 t.Fatalf("expected error because the deployment doesn't exist: %v", err) 7143 } 7144 } 7145 7146 // Test that allocation health can't be set against a terminal deployment 7147 func TestStateStore_UpsertDeploymentAllocHealth_Terminal(t *testing.T) { 7148 t.Parallel() 7149 7150 state := testStateStore(t) 7151 7152 // Insert a terminal deployment 7153 d := mock.Deployment() 7154 d.Status = structs.DeploymentStatusFailed 7155 7156 if err := state.UpsertDeployment(1, d); err != nil { 7157 t.Fatalf("bad: %v", err) 7158 } 7159 7160 // Set health against the terminal deployment 7161 req := &structs.ApplyDeploymentAllocHealthRequest{ 7162 DeploymentAllocHealthRequest: structs.DeploymentAllocHealthRequest{ 7163 DeploymentID: d.ID, 7164 HealthyAllocationIDs: []string{uuid.Generate()}, 7165 }, 7166 } 7167 err := state.UpdateDeploymentAllocHealth(2, req) 7168 if err == nil || !strings.Contains(err.Error(), "has terminal status") { 7169 t.Fatalf("expected error because the deployment is terminal: %v", err) 7170 } 7171 } 7172 7173 // Test that allocation health can't be set against a nonexistent alloc 7174 func TestStateStore_UpsertDeploymentAllocHealth_BadAlloc_Nonexistent(t *testing.T) { 7175 t.Parallel() 7176 7177 state := testStateStore(t) 7178 7179 // Insert a deployment 7180 d := mock.Deployment() 7181 if err := state.UpsertDeployment(1, d); err != nil { 7182 t.Fatalf("bad: %v", err) 7183 } 7184 7185 // Set health against the terminal deployment 7186 req := &structs.ApplyDeploymentAllocHealthRequest{ 7187 DeploymentAllocHealthRequest: structs.DeploymentAllocHealthRequest{ 7188 DeploymentID: d.ID, 7189 HealthyAllocationIDs: []string{uuid.Generate()}, 7190 }, 7191 } 7192 err := state.UpdateDeploymentAllocHealth(2, req) 7193 if err == nil || !strings.Contains(err.Error(), "unknown alloc") { 7194 t.Fatalf("expected error because the alloc doesn't exist: %v", err) 7195 } 7196 } 7197 7198 // Test that a deployments PlacedCanaries is properly updated 7199 func TestStateStore_UpsertDeploymentAlloc_Canaries(t *testing.T) { 7200 t.Parallel() 7201 7202 state := testStateStore(t) 7203 7204 // Create a deployment 7205 d1 := mock.Deployment() 7206 require.NoError(t, state.UpsertDeployment(2, d1)) 7207 7208 // Create a Job 7209 job := mock.Job() 7210 require.NoError(t, state.UpsertJob(3, job)) 7211 7212 // Create alloc with canary status 7213 a := mock.Alloc() 7214 a.JobID = job.ID 7215 a.DeploymentID = d1.ID 7216 a.DeploymentStatus = &structs.AllocDeploymentStatus{ 7217 Healthy: helper.BoolToPtr(false), 7218 Canary: true, 7219 } 7220 require.NoError(t, state.UpsertAllocs(4, []*structs.Allocation{a})) 7221 7222 // Pull the deployment from state 7223 ws := memdb.NewWatchSet() 7224 deploy, err := state.DeploymentByID(ws, d1.ID) 7225 require.NoError(t, err) 7226 7227 // Ensure that PlacedCanaries is accurate 7228 require.Equal(t, 1, len(deploy.TaskGroups[job.TaskGroups[0].Name].PlacedCanaries)) 7229 7230 // Create alloc without canary status 7231 b := mock.Alloc() 7232 b.JobID = job.ID 7233 b.DeploymentID = d1.ID 7234 b.DeploymentStatus = &structs.AllocDeploymentStatus{ 7235 Healthy: helper.BoolToPtr(false), 7236 Canary: false, 7237 } 7238 require.NoError(t, state.UpsertAllocs(4, []*structs.Allocation{b})) 7239 7240 // Pull the deployment from state 7241 ws = memdb.NewWatchSet() 7242 deploy, err = state.DeploymentByID(ws, d1.ID) 7243 require.NoError(t, err) 7244 7245 // Ensure that PlacedCanaries is accurate 7246 require.Equal(t, 1, len(deploy.TaskGroups[job.TaskGroups[0].Name].PlacedCanaries)) 7247 7248 // Create a second deployment 7249 d2 := mock.Deployment() 7250 require.NoError(t, state.UpsertDeployment(5, d2)) 7251 7252 c := mock.Alloc() 7253 c.JobID = job.ID 7254 c.DeploymentID = d2.ID 7255 c.DeploymentStatus = &structs.AllocDeploymentStatus{ 7256 Healthy: helper.BoolToPtr(false), 7257 Canary: true, 7258 } 7259 require.NoError(t, state.UpsertAllocs(6, []*structs.Allocation{c})) 7260 7261 ws = memdb.NewWatchSet() 7262 deploy2, err := state.DeploymentByID(ws, d2.ID) 7263 require.NoError(t, err) 7264 7265 // Ensure that PlacedCanaries is accurate 7266 require.Equal(t, 1, len(deploy2.TaskGroups[job.TaskGroups[0].Name].PlacedCanaries)) 7267 } 7268 7269 func TestStateStore_UpsertDeploymentAlloc_NoCanaries(t *testing.T) { 7270 t.Parallel() 7271 7272 state := testStateStore(t) 7273 7274 // Create a deployment 7275 d1 := mock.Deployment() 7276 require.NoError(t, state.UpsertDeployment(2, d1)) 7277 7278 // Create a Job 7279 job := mock.Job() 7280 require.NoError(t, state.UpsertJob(3, job)) 7281 7282 // Create alloc with canary status 7283 a := mock.Alloc() 7284 a.JobID = job.ID 7285 a.DeploymentID = d1.ID 7286 a.DeploymentStatus = &structs.AllocDeploymentStatus{ 7287 Healthy: helper.BoolToPtr(true), 7288 Canary: false, 7289 } 7290 require.NoError(t, state.UpsertAllocs(4, []*structs.Allocation{a})) 7291 7292 // Pull the deployment from state 7293 ws := memdb.NewWatchSet() 7294 deploy, err := state.DeploymentByID(ws, d1.ID) 7295 require.NoError(t, err) 7296 7297 // Ensure that PlacedCanaries is accurate 7298 require.Equal(t, 0, len(deploy.TaskGroups[job.TaskGroups[0].Name].PlacedCanaries)) 7299 } 7300 7301 // Test that allocation health can't be set for an alloc with mismatched 7302 // deployment ids 7303 func TestStateStore_UpsertDeploymentAllocHealth_BadAlloc_MismatchDeployment(t *testing.T) { 7304 t.Parallel() 7305 7306 state := testStateStore(t) 7307 7308 // Insert two deployment 7309 d1 := mock.Deployment() 7310 d2 := mock.Deployment() 7311 if err := state.UpsertDeployment(1, d1); err != nil { 7312 t.Fatalf("bad: %v", err) 7313 } 7314 if err := state.UpsertDeployment(2, d2); err != nil { 7315 t.Fatalf("bad: %v", err) 7316 } 7317 7318 // Insert an alloc for a random deployment 7319 a := mock.Alloc() 7320 a.DeploymentID = d1.ID 7321 if err := state.UpsertAllocs(3, []*structs.Allocation{a}); err != nil { 7322 t.Fatalf("bad: %v", err) 7323 } 7324 7325 // Set health against the terminal deployment 7326 req := &structs.ApplyDeploymentAllocHealthRequest{ 7327 DeploymentAllocHealthRequest: structs.DeploymentAllocHealthRequest{ 7328 DeploymentID: d2.ID, 7329 HealthyAllocationIDs: []string{a.ID}, 7330 }, 7331 } 7332 err := state.UpdateDeploymentAllocHealth(4, req) 7333 if err == nil || !strings.Contains(err.Error(), "not part of deployment") { 7334 t.Fatalf("expected error because the alloc isn't part of the deployment: %v", err) 7335 } 7336 } 7337 7338 // Test that allocation health is properly set 7339 func TestStateStore_UpsertDeploymentAllocHealth(t *testing.T) { 7340 t.Parallel() 7341 7342 state := testStateStore(t) 7343 7344 // Insert a deployment 7345 d := mock.Deployment() 7346 d.TaskGroups["web"].ProgressDeadline = 5 * time.Minute 7347 if err := state.UpsertDeployment(1, d); err != nil { 7348 t.Fatalf("bad: %v", err) 7349 } 7350 7351 // Insert two allocations 7352 a1 := mock.Alloc() 7353 a1.DeploymentID = d.ID 7354 a2 := mock.Alloc() 7355 a2.DeploymentID = d.ID 7356 if err := state.UpsertAllocs(2, []*structs.Allocation{a1, a2}); err != nil { 7357 t.Fatalf("bad: %v", err) 7358 } 7359 7360 // Create a job to roll back to 7361 j := mock.Job() 7362 7363 // Create an eval that should be upserted 7364 e := mock.Eval() 7365 7366 // Create a status update for the deployment 7367 status, desc := structs.DeploymentStatusFailed, "foo" 7368 u := &structs.DeploymentStatusUpdate{ 7369 DeploymentID: d.ID, 7370 Status: status, 7371 StatusDescription: desc, 7372 } 7373 7374 // Capture the time for the update 7375 ts := time.Now() 7376 7377 // Set health against the deployment 7378 req := &structs.ApplyDeploymentAllocHealthRequest{ 7379 DeploymentAllocHealthRequest: structs.DeploymentAllocHealthRequest{ 7380 DeploymentID: d.ID, 7381 HealthyAllocationIDs: []string{a1.ID}, 7382 UnhealthyAllocationIDs: []string{a2.ID}, 7383 }, 7384 Job: j, 7385 Eval: e, 7386 DeploymentUpdate: u, 7387 Timestamp: ts, 7388 } 7389 err := state.UpdateDeploymentAllocHealth(3, req) 7390 if err != nil { 7391 t.Fatalf("bad: %v", err) 7392 } 7393 7394 // Check that the status was updated properly 7395 ws := memdb.NewWatchSet() 7396 dout, err := state.DeploymentByID(ws, d.ID) 7397 if err != nil { 7398 t.Fatalf("bad: %v", err) 7399 } 7400 if dout.Status != status || dout.StatusDescription != desc { 7401 t.Fatalf("bad: %#v", dout) 7402 } 7403 7404 // Check that the evaluation was created 7405 eout, _ := state.EvalByID(ws, e.ID) 7406 if err != nil { 7407 t.Fatalf("bad: %v", err) 7408 } 7409 if eout == nil { 7410 t.Fatalf("bad: %#v", eout) 7411 } 7412 7413 // Check that the job was created 7414 jout, _ := state.JobByID(ws, j.Namespace, j.ID) 7415 if err != nil { 7416 t.Fatalf("bad: %v", err) 7417 } 7418 if jout == nil { 7419 t.Fatalf("bad: %#v", jout) 7420 } 7421 7422 // Check the status of the allocs 7423 out1, err := state.AllocByID(ws, a1.ID) 7424 if err != nil { 7425 t.Fatalf("err: %v", err) 7426 } 7427 out2, err := state.AllocByID(ws, a2.ID) 7428 if err != nil { 7429 t.Fatalf("err: %v", err) 7430 } 7431 7432 if !out1.DeploymentStatus.IsHealthy() { 7433 t.Fatalf("bad: alloc %q not healthy", out1.ID) 7434 } 7435 if !out2.DeploymentStatus.IsUnhealthy() { 7436 t.Fatalf("bad: alloc %q not unhealthy", out2.ID) 7437 } 7438 7439 if !out1.DeploymentStatus.Timestamp.Equal(ts) { 7440 t.Fatalf("bad: alloc %q had timestamp %v; want %v", out1.ID, out1.DeploymentStatus.Timestamp, ts) 7441 } 7442 if !out2.DeploymentStatus.Timestamp.Equal(ts) { 7443 t.Fatalf("bad: alloc %q had timestamp %v; want %v", out2.ID, out2.DeploymentStatus.Timestamp, ts) 7444 } 7445 } 7446 7447 func TestStateStore_UpsertVaultAccessors(t *testing.T) { 7448 t.Parallel() 7449 7450 state := testStateStore(t) 7451 a := mock.VaultAccessor() 7452 a2 := mock.VaultAccessor() 7453 7454 ws := memdb.NewWatchSet() 7455 if _, err := state.VaultAccessor(ws, a.Accessor); err != nil { 7456 t.Fatalf("err: %v", err) 7457 } 7458 7459 if _, err := state.VaultAccessor(ws, a2.Accessor); err != nil { 7460 t.Fatalf("err: %v", err) 7461 } 7462 7463 err := state.UpsertVaultAccessor(1000, []*structs.VaultAccessor{a, a2}) 7464 if err != nil { 7465 t.Fatalf("err: %v", err) 7466 } 7467 7468 if !watchFired(ws) { 7469 t.Fatalf("bad") 7470 } 7471 7472 ws = memdb.NewWatchSet() 7473 out, err := state.VaultAccessor(ws, a.Accessor) 7474 if err != nil { 7475 t.Fatalf("err: %v", err) 7476 } 7477 7478 if !reflect.DeepEqual(a, out) { 7479 t.Fatalf("bad: %#v %#v", a, out) 7480 } 7481 7482 out, err = state.VaultAccessor(ws, a2.Accessor) 7483 if err != nil { 7484 t.Fatalf("err: %v", err) 7485 } 7486 7487 if !reflect.DeepEqual(a2, out) { 7488 t.Fatalf("bad: %#v %#v", a2, out) 7489 } 7490 7491 iter, err := state.VaultAccessors(ws) 7492 if err != nil { 7493 t.Fatalf("err: %v", err) 7494 } 7495 7496 count := 0 7497 for { 7498 raw := iter.Next() 7499 if raw == nil { 7500 break 7501 } 7502 7503 count++ 7504 accessor := raw.(*structs.VaultAccessor) 7505 7506 if !reflect.DeepEqual(accessor, a) && !reflect.DeepEqual(accessor, a2) { 7507 t.Fatalf("bad: %#v", accessor) 7508 } 7509 } 7510 7511 if count != 2 { 7512 t.Fatalf("bad: %d", count) 7513 } 7514 7515 index, err := state.Index("vault_accessors") 7516 if err != nil { 7517 t.Fatalf("err: %v", err) 7518 } 7519 if index != 1000 { 7520 t.Fatalf("bad: %d", index) 7521 } 7522 7523 if watchFired(ws) { 7524 t.Fatalf("bad") 7525 } 7526 } 7527 7528 func TestStateStore_DeleteVaultAccessors(t *testing.T) { 7529 t.Parallel() 7530 7531 state := testStateStore(t) 7532 a1 := mock.VaultAccessor() 7533 a2 := mock.VaultAccessor() 7534 accessors := []*structs.VaultAccessor{a1, a2} 7535 7536 err := state.UpsertVaultAccessor(1000, accessors) 7537 if err != nil { 7538 t.Fatalf("err: %v", err) 7539 } 7540 7541 ws := memdb.NewWatchSet() 7542 if _, err := state.VaultAccessor(ws, a1.Accessor); err != nil { 7543 t.Fatalf("err: %v", err) 7544 } 7545 7546 err = state.DeleteVaultAccessors(1001, accessors) 7547 if err != nil { 7548 t.Fatalf("err: %v", err) 7549 } 7550 7551 if !watchFired(ws) { 7552 t.Fatalf("bad") 7553 } 7554 7555 ws = memdb.NewWatchSet() 7556 out, err := state.VaultAccessor(ws, a1.Accessor) 7557 if err != nil { 7558 t.Fatalf("err: %v", err) 7559 } 7560 if out != nil { 7561 t.Fatalf("bad: %#v %#v", a1, out) 7562 } 7563 out, err = state.VaultAccessor(ws, a2.Accessor) 7564 if err != nil { 7565 t.Fatalf("err: %v", err) 7566 } 7567 if out != nil { 7568 t.Fatalf("bad: %#v %#v", a2, out) 7569 } 7570 7571 index, err := state.Index("vault_accessors") 7572 if err != nil { 7573 t.Fatalf("err: %v", err) 7574 } 7575 if index != 1001 { 7576 t.Fatalf("bad: %d", index) 7577 } 7578 7579 if watchFired(ws) { 7580 t.Fatalf("bad") 7581 } 7582 } 7583 7584 func TestStateStore_VaultAccessorsByAlloc(t *testing.T) { 7585 t.Parallel() 7586 7587 state := testStateStore(t) 7588 alloc := mock.Alloc() 7589 var accessors []*structs.VaultAccessor 7590 var expected []*structs.VaultAccessor 7591 7592 for i := 0; i < 5; i++ { 7593 accessor := mock.VaultAccessor() 7594 accessor.AllocID = alloc.ID 7595 expected = append(expected, accessor) 7596 accessors = append(accessors, accessor) 7597 } 7598 7599 for i := 0; i < 10; i++ { 7600 accessor := mock.VaultAccessor() 7601 accessors = append(accessors, accessor) 7602 } 7603 7604 err := state.UpsertVaultAccessor(1000, accessors) 7605 if err != nil { 7606 t.Fatalf("err: %v", err) 7607 } 7608 7609 ws := memdb.NewWatchSet() 7610 out, err := state.VaultAccessorsByAlloc(ws, alloc.ID) 7611 if err != nil { 7612 t.Fatalf("err: %v", err) 7613 } 7614 7615 if len(expected) != len(out) { 7616 t.Fatalf("bad: %#v %#v", len(expected), len(out)) 7617 } 7618 7619 index, err := state.Index("vault_accessors") 7620 if err != nil { 7621 t.Fatalf("err: %v", err) 7622 } 7623 if index != 1000 { 7624 t.Fatalf("bad: %d", index) 7625 } 7626 7627 if watchFired(ws) { 7628 t.Fatalf("bad") 7629 } 7630 } 7631 7632 func TestStateStore_VaultAccessorsByNode(t *testing.T) { 7633 t.Parallel() 7634 7635 state := testStateStore(t) 7636 node := mock.Node() 7637 var accessors []*structs.VaultAccessor 7638 var expected []*structs.VaultAccessor 7639 7640 for i := 0; i < 5; i++ { 7641 accessor := mock.VaultAccessor() 7642 accessor.NodeID = node.ID 7643 expected = append(expected, accessor) 7644 accessors = append(accessors, accessor) 7645 } 7646 7647 for i := 0; i < 10; i++ { 7648 accessor := mock.VaultAccessor() 7649 accessors = append(accessors, accessor) 7650 } 7651 7652 err := state.UpsertVaultAccessor(1000, accessors) 7653 if err != nil { 7654 t.Fatalf("err: %v", err) 7655 } 7656 7657 ws := memdb.NewWatchSet() 7658 out, err := state.VaultAccessorsByNode(ws, node.ID) 7659 if err != nil { 7660 t.Fatalf("err: %v", err) 7661 } 7662 7663 if len(expected) != len(out) { 7664 t.Fatalf("bad: %#v %#v", len(expected), len(out)) 7665 } 7666 7667 index, err := state.Index("vault_accessors") 7668 if err != nil { 7669 t.Fatalf("err: %v", err) 7670 } 7671 if index != 1000 { 7672 t.Fatalf("bad: %d", index) 7673 } 7674 7675 if watchFired(ws) { 7676 t.Fatalf("bad") 7677 } 7678 } 7679 7680 func TestStateStore_RestoreVaultAccessor(t *testing.T) { 7681 t.Parallel() 7682 7683 state := testStateStore(t) 7684 a := mock.VaultAccessor() 7685 7686 restore, err := state.Restore() 7687 if err != nil { 7688 t.Fatalf("err: %v", err) 7689 } 7690 7691 err = restore.VaultAccessorRestore(a) 7692 if err != nil { 7693 t.Fatalf("err: %v", err) 7694 } 7695 restore.Commit() 7696 7697 ws := memdb.NewWatchSet() 7698 out, err := state.VaultAccessor(ws, a.Accessor) 7699 if err != nil { 7700 t.Fatalf("err: %v", err) 7701 } 7702 7703 if !reflect.DeepEqual(out, a) { 7704 t.Fatalf("Bad: %#v %#v", out, a) 7705 } 7706 7707 if watchFired(ws) { 7708 t.Fatalf("bad") 7709 } 7710 } 7711 7712 func TestStateStore_UpsertSITokenAccessors(t *testing.T) { 7713 t.Parallel() 7714 r := require.New(t) 7715 7716 state := testStateStore(t) 7717 a1 := mock.SITokenAccessor() 7718 a2 := mock.SITokenAccessor() 7719 7720 ws := memdb.NewWatchSet() 7721 var err error 7722 7723 _, err = state.SITokenAccessor(ws, a1.AccessorID) 7724 r.NoError(err) 7725 7726 _, err = state.SITokenAccessor(ws, a2.AccessorID) 7727 r.NoError(err) 7728 7729 err = state.UpsertSITokenAccessors(1000, []*structs.SITokenAccessor{a1, a2}) 7730 r.NoError(err) 7731 7732 wsFired := watchFired(ws) 7733 r.True(wsFired) 7734 7735 noInsertWS := memdb.NewWatchSet() 7736 result1, err := state.SITokenAccessor(noInsertWS, a1.AccessorID) 7737 r.NoError(err) 7738 r.Equal(a1, result1) 7739 7740 result2, err := state.SITokenAccessor(noInsertWS, a2.AccessorID) 7741 r.NoError(err) 7742 r.Equal(a2, result2) 7743 7744 iter, err := state.SITokenAccessors(noInsertWS) 7745 r.NoError(err) 7746 7747 count := 0 7748 for raw := iter.Next(); raw != nil; raw = iter.Next() { 7749 count++ 7750 accessor := raw.(*structs.SITokenAccessor) 7751 // iterator is sorted by dynamic UUID 7752 matches := reflect.DeepEqual(a1, accessor) || reflect.DeepEqual(a2, accessor) 7753 r.True(matches) 7754 } 7755 r.Equal(2, count) 7756 7757 index, err := state.Index(siTokenAccessorTable) 7758 r.NoError(err) 7759 r.Equal(uint64(1000), index) 7760 7761 noInsertWSFired := watchFired(noInsertWS) 7762 r.False(noInsertWSFired) 7763 } 7764 7765 func TestStateStore_DeleteSITokenAccessors(t *testing.T) { 7766 t.Parallel() 7767 r := require.New(t) 7768 7769 state := testStateStore(t) 7770 a1 := mock.SITokenAccessor() 7771 a2 := mock.SITokenAccessor() 7772 accessors := []*structs.SITokenAccessor{a1, a2} 7773 var err error 7774 7775 err = state.UpsertSITokenAccessors(1000, accessors) 7776 r.NoError(err) 7777 7778 ws := memdb.NewWatchSet() 7779 _, err = state.SITokenAccessor(ws, a1.AccessorID) 7780 r.NoError(err) 7781 7782 err = state.DeleteSITokenAccessors(1001, accessors) 7783 r.NoError(err) 7784 7785 wsFired := watchFired(ws) 7786 r.True(wsFired) 7787 7788 wsPostDelete := memdb.NewWatchSet() 7789 7790 result1, err := state.SITokenAccessor(wsPostDelete, a1.AccessorID) 7791 r.NoError(err) 7792 r.Nil(result1) // was deleted 7793 7794 result2, err := state.SITokenAccessor(wsPostDelete, a2.AccessorID) 7795 r.NoError(err) 7796 r.Nil(result2) // was deleted 7797 7798 index, err := state.Index(siTokenAccessorTable) 7799 r.NoError(err) 7800 r.Equal(uint64(1001), index) 7801 7802 wsPostDeleteFired := watchFired(wsPostDelete) 7803 r.False(wsPostDeleteFired) 7804 } 7805 7806 func TestStateStore_SITokenAccessorsByAlloc(t *testing.T) { 7807 t.Parallel() 7808 r := require.New(t) 7809 7810 state := testStateStore(t) 7811 alloc := mock.Alloc() 7812 var accessors []*structs.SITokenAccessor 7813 var expected []*structs.SITokenAccessor 7814 7815 for i := 0; i < 5; i++ { 7816 accessor := mock.SITokenAccessor() 7817 accessor.AllocID = alloc.ID 7818 expected = append(expected, accessor) 7819 accessors = append(accessors, accessor) 7820 } 7821 7822 for i := 0; i < 10; i++ { 7823 accessor := mock.SITokenAccessor() 7824 accessor.AllocID = uuid.Generate() // does not belong to alloc 7825 accessors = append(accessors, accessor) 7826 } 7827 7828 err := state.UpsertSITokenAccessors(1000, accessors) 7829 r.NoError(err) 7830 7831 ws := memdb.NewWatchSet() 7832 result, err := state.SITokenAccessorsByAlloc(ws, alloc.ID) 7833 r.NoError(err) 7834 r.ElementsMatch(expected, result) 7835 7836 index, err := state.Index(siTokenAccessorTable) 7837 r.NoError(err) 7838 r.Equal(uint64(1000), index) 7839 7840 wsFired := watchFired(ws) 7841 r.False(wsFired) 7842 } 7843 7844 func TestStateStore_SITokenAccessorsByNode(t *testing.T) { 7845 t.Parallel() 7846 r := require.New(t) 7847 7848 state := testStateStore(t) 7849 node := mock.Node() 7850 var accessors []*structs.SITokenAccessor 7851 var expected []*structs.SITokenAccessor 7852 var err error 7853 7854 for i := 0; i < 5; i++ { 7855 accessor := mock.SITokenAccessor() 7856 accessor.NodeID = node.ID 7857 expected = append(expected, accessor) 7858 accessors = append(accessors, accessor) 7859 } 7860 7861 for i := 0; i < 10; i++ { 7862 accessor := mock.SITokenAccessor() 7863 accessor.NodeID = uuid.Generate() // does not belong to node 7864 accessors = append(accessors, accessor) 7865 } 7866 7867 err = state.UpsertSITokenAccessors(1000, accessors) 7868 r.NoError(err) 7869 7870 ws := memdb.NewWatchSet() 7871 result, err := state.SITokenAccessorsByNode(ws, node.ID) 7872 r.NoError(err) 7873 r.ElementsMatch(expected, result) 7874 7875 index, err := state.Index(siTokenAccessorTable) 7876 r.NoError(err) 7877 r.Equal(uint64(1000), index) 7878 7879 wsFired := watchFired(ws) 7880 r.False(wsFired) 7881 } 7882 7883 func TestStateStore_RestoreSITokenAccessor(t *testing.T) { 7884 t.Parallel() 7885 r := require.New(t) 7886 7887 state := testStateStore(t) 7888 a1 := mock.SITokenAccessor() 7889 7890 restore, err := state.Restore() 7891 r.NoError(err) 7892 7893 err = restore.SITokenAccessorRestore(a1) 7894 r.NoError(err) 7895 7896 restore.Commit() 7897 7898 ws := memdb.NewWatchSet() 7899 result, err := state.SITokenAccessor(ws, a1.AccessorID) 7900 r.NoError(err) 7901 r.Equal(a1, result) 7902 7903 wsFired := watchFired(ws) 7904 r.False(wsFired) 7905 } 7906 7907 func TestStateStore_UpsertACLPolicy(t *testing.T) { 7908 t.Parallel() 7909 7910 state := testStateStore(t) 7911 policy := mock.ACLPolicy() 7912 policy2 := mock.ACLPolicy() 7913 7914 ws := memdb.NewWatchSet() 7915 if _, err := state.ACLPolicyByName(ws, policy.Name); err != nil { 7916 t.Fatalf("err: %v", err) 7917 } 7918 if _, err := state.ACLPolicyByName(ws, policy2.Name); err != nil { 7919 t.Fatalf("err: %v", err) 7920 } 7921 7922 if err := state.UpsertACLPolicies(1000, 7923 []*structs.ACLPolicy{policy, policy2}); err != nil { 7924 t.Fatalf("err: %v", err) 7925 } 7926 if !watchFired(ws) { 7927 t.Fatalf("bad") 7928 } 7929 7930 ws = memdb.NewWatchSet() 7931 out, err := state.ACLPolicyByName(ws, policy.Name) 7932 assert.Equal(t, nil, err) 7933 assert.Equal(t, policy, out) 7934 7935 out, err = state.ACLPolicyByName(ws, policy2.Name) 7936 assert.Equal(t, nil, err) 7937 assert.Equal(t, policy2, out) 7938 7939 iter, err := state.ACLPolicies(ws) 7940 if err != nil { 7941 t.Fatalf("err: %v", err) 7942 } 7943 7944 // Ensure we see both policies 7945 count := 0 7946 for { 7947 raw := iter.Next() 7948 if raw == nil { 7949 break 7950 } 7951 count++ 7952 } 7953 if count != 2 { 7954 t.Fatalf("bad: %d", count) 7955 } 7956 7957 index, err := state.Index("acl_policy") 7958 if err != nil { 7959 t.Fatalf("err: %v", err) 7960 } 7961 if index != 1000 { 7962 t.Fatalf("bad: %d", index) 7963 } 7964 7965 if watchFired(ws) { 7966 t.Fatalf("bad") 7967 } 7968 } 7969 7970 func TestStateStore_DeleteACLPolicy(t *testing.T) { 7971 t.Parallel() 7972 7973 state := testStateStore(t) 7974 policy := mock.ACLPolicy() 7975 policy2 := mock.ACLPolicy() 7976 7977 // Create the policy 7978 if err := state.UpsertACLPolicies(1000, 7979 []*structs.ACLPolicy{policy, policy2}); err != nil { 7980 t.Fatalf("err: %v", err) 7981 } 7982 7983 // Create a watcher 7984 ws := memdb.NewWatchSet() 7985 if _, err := state.ACLPolicyByName(ws, policy.Name); err != nil { 7986 t.Fatalf("err: %v", err) 7987 } 7988 7989 // Delete the policy 7990 if err := state.DeleteACLPolicies(1001, 7991 []string{policy.Name, policy2.Name}); err != nil { 7992 t.Fatalf("err: %v", err) 7993 } 7994 7995 // Ensure watching triggered 7996 if !watchFired(ws) { 7997 t.Fatalf("bad") 7998 } 7999 8000 // Ensure we don't get the object back 8001 ws = memdb.NewWatchSet() 8002 out, err := state.ACLPolicyByName(ws, policy.Name) 8003 assert.Equal(t, nil, err) 8004 if out != nil { 8005 t.Fatalf("bad: %#v", out) 8006 } 8007 8008 iter, err := state.ACLPolicies(ws) 8009 if err != nil { 8010 t.Fatalf("err: %v", err) 8011 } 8012 8013 // Ensure we see neither policy 8014 count := 0 8015 for { 8016 raw := iter.Next() 8017 if raw == nil { 8018 break 8019 } 8020 count++ 8021 } 8022 if count != 0 { 8023 t.Fatalf("bad: %d", count) 8024 } 8025 8026 index, err := state.Index("acl_policy") 8027 if err != nil { 8028 t.Fatalf("err: %v", err) 8029 } 8030 if index != 1001 { 8031 t.Fatalf("bad: %d", index) 8032 } 8033 8034 if watchFired(ws) { 8035 t.Fatalf("bad") 8036 } 8037 } 8038 8039 func TestStateStore_ACLPolicyByNamePrefix(t *testing.T) { 8040 t.Parallel() 8041 8042 state := testStateStore(t) 8043 names := []string{ 8044 "foo", 8045 "bar", 8046 "foobar", 8047 "foozip", 8048 "zip", 8049 } 8050 8051 // Create the policies 8052 var baseIndex uint64 = 1000 8053 for _, name := range names { 8054 p := mock.ACLPolicy() 8055 p.Name = name 8056 if err := state.UpsertACLPolicies(baseIndex, []*structs.ACLPolicy{p}); err != nil { 8057 t.Fatalf("err: %v", err) 8058 } 8059 baseIndex++ 8060 } 8061 8062 // Scan by prefix 8063 iter, err := state.ACLPolicyByNamePrefix(nil, "foo") 8064 if err != nil { 8065 t.Fatalf("err: %v", err) 8066 } 8067 8068 // Ensure we see both policies 8069 count := 0 8070 out := []string{} 8071 for { 8072 raw := iter.Next() 8073 if raw == nil { 8074 break 8075 } 8076 count++ 8077 out = append(out, raw.(*structs.ACLPolicy).Name) 8078 } 8079 if count != 3 { 8080 t.Fatalf("bad: %d %v", count, out) 8081 } 8082 sort.Strings(out) 8083 8084 expect := []string{"foo", "foobar", "foozip"} 8085 assert.Equal(t, expect, out) 8086 } 8087 8088 func TestStateStore_BootstrapACLTokens(t *testing.T) { 8089 t.Parallel() 8090 8091 state := testStateStore(t) 8092 tk1 := mock.ACLToken() 8093 tk2 := mock.ACLToken() 8094 8095 ok, resetIdx, err := state.CanBootstrapACLToken() 8096 assert.Nil(t, err) 8097 assert.Equal(t, true, ok) 8098 assert.EqualValues(t, 0, resetIdx) 8099 8100 if err := state.BootstrapACLTokens(1000, 0, tk1); err != nil { 8101 t.Fatalf("err: %v", err) 8102 } 8103 8104 out, err := state.ACLTokenByAccessorID(nil, tk1.AccessorID) 8105 assert.Equal(t, nil, err) 8106 assert.Equal(t, tk1, out) 8107 8108 ok, resetIdx, err = state.CanBootstrapACLToken() 8109 assert.Nil(t, err) 8110 assert.Equal(t, false, ok) 8111 assert.EqualValues(t, 1000, resetIdx) 8112 8113 if err := state.BootstrapACLTokens(1001, 0, tk2); err == nil { 8114 t.Fatalf("expected error") 8115 } 8116 8117 iter, err := state.ACLTokens(nil) 8118 if err != nil { 8119 t.Fatalf("err: %v", err) 8120 } 8121 8122 // Ensure we see both policies 8123 count := 0 8124 for { 8125 raw := iter.Next() 8126 if raw == nil { 8127 break 8128 } 8129 count++ 8130 } 8131 if count != 1 { 8132 t.Fatalf("bad: %d", count) 8133 } 8134 8135 index, err := state.Index("acl_token") 8136 if err != nil { 8137 t.Fatalf("err: %v", err) 8138 } 8139 if index != 1000 { 8140 t.Fatalf("bad: %d", index) 8141 } 8142 index, err = state.Index("acl_token_bootstrap") 8143 if err != nil { 8144 t.Fatalf("err: %v", err) 8145 } 8146 if index != 1000 { 8147 t.Fatalf("bad: %d", index) 8148 } 8149 8150 // Should allow bootstrap with reset index 8151 if err := state.BootstrapACLTokens(1001, 1000, tk2); err != nil { 8152 t.Fatalf("err %v", err) 8153 } 8154 8155 // Check we've modified the index 8156 index, err = state.Index("acl_token") 8157 if err != nil { 8158 t.Fatalf("err: %v", err) 8159 } 8160 if index != 1001 { 8161 t.Fatalf("bad: %d", index) 8162 } 8163 index, err = state.Index("acl_token_bootstrap") 8164 if err != nil { 8165 t.Fatalf("err: %v", err) 8166 } 8167 if index != 1001 { 8168 t.Fatalf("bad: %d", index) 8169 } 8170 } 8171 8172 func TestStateStore_UpsertACLTokens(t *testing.T) { 8173 t.Parallel() 8174 8175 state := testStateStore(t) 8176 tk1 := mock.ACLToken() 8177 tk2 := mock.ACLToken() 8178 8179 ws := memdb.NewWatchSet() 8180 if _, err := state.ACLTokenByAccessorID(ws, tk1.AccessorID); err != nil { 8181 t.Fatalf("err: %v", err) 8182 } 8183 if _, err := state.ACLTokenByAccessorID(ws, tk2.AccessorID); err != nil { 8184 t.Fatalf("err: %v", err) 8185 } 8186 8187 if err := state.UpsertACLTokens(1000, 8188 []*structs.ACLToken{tk1, tk2}); err != nil { 8189 t.Fatalf("err: %v", err) 8190 } 8191 if !watchFired(ws) { 8192 t.Fatalf("bad") 8193 } 8194 8195 ws = memdb.NewWatchSet() 8196 out, err := state.ACLTokenByAccessorID(ws, tk1.AccessorID) 8197 assert.Equal(t, nil, err) 8198 assert.Equal(t, tk1, out) 8199 8200 out, err = state.ACLTokenByAccessorID(ws, tk2.AccessorID) 8201 assert.Equal(t, nil, err) 8202 assert.Equal(t, tk2, out) 8203 8204 out, err = state.ACLTokenBySecretID(ws, tk1.SecretID) 8205 assert.Equal(t, nil, err) 8206 assert.Equal(t, tk1, out) 8207 8208 out, err = state.ACLTokenBySecretID(ws, tk2.SecretID) 8209 assert.Equal(t, nil, err) 8210 assert.Equal(t, tk2, out) 8211 8212 iter, err := state.ACLTokens(ws) 8213 if err != nil { 8214 t.Fatalf("err: %v", err) 8215 } 8216 8217 // Ensure we see both policies 8218 count := 0 8219 for { 8220 raw := iter.Next() 8221 if raw == nil { 8222 break 8223 } 8224 count++ 8225 } 8226 if count != 2 { 8227 t.Fatalf("bad: %d", count) 8228 } 8229 8230 index, err := state.Index("acl_token") 8231 if err != nil { 8232 t.Fatalf("err: %v", err) 8233 } 8234 if index != 1000 { 8235 t.Fatalf("bad: %d", index) 8236 } 8237 8238 if watchFired(ws) { 8239 t.Fatalf("bad") 8240 } 8241 } 8242 8243 func TestStateStore_DeleteACLTokens(t *testing.T) { 8244 t.Parallel() 8245 8246 state := testStateStore(t) 8247 tk1 := mock.ACLToken() 8248 tk2 := mock.ACLToken() 8249 8250 // Create the tokens 8251 if err := state.UpsertACLTokens(1000, 8252 []*structs.ACLToken{tk1, tk2}); err != nil { 8253 t.Fatalf("err: %v", err) 8254 } 8255 8256 // Create a watcher 8257 ws := memdb.NewWatchSet() 8258 if _, err := state.ACLTokenByAccessorID(ws, tk1.AccessorID); err != nil { 8259 t.Fatalf("err: %v", err) 8260 } 8261 8262 // Delete the token 8263 if err := state.DeleteACLTokens(1001, 8264 []string{tk1.AccessorID, tk2.AccessorID}); err != nil { 8265 t.Fatalf("err: %v", err) 8266 } 8267 8268 // Ensure watching triggered 8269 if !watchFired(ws) { 8270 t.Fatalf("bad") 8271 } 8272 8273 // Ensure we don't get the object back 8274 ws = memdb.NewWatchSet() 8275 out, err := state.ACLTokenByAccessorID(ws, tk1.AccessorID) 8276 assert.Equal(t, nil, err) 8277 if out != nil { 8278 t.Fatalf("bad: %#v", out) 8279 } 8280 8281 iter, err := state.ACLTokens(ws) 8282 if err != nil { 8283 t.Fatalf("err: %v", err) 8284 } 8285 8286 // Ensure we see both policies 8287 count := 0 8288 for { 8289 raw := iter.Next() 8290 if raw == nil { 8291 break 8292 } 8293 count++ 8294 } 8295 if count != 0 { 8296 t.Fatalf("bad: %d", count) 8297 } 8298 8299 index, err := state.Index("acl_token") 8300 if err != nil { 8301 t.Fatalf("err: %v", err) 8302 } 8303 if index != 1001 { 8304 t.Fatalf("bad: %d", index) 8305 } 8306 8307 if watchFired(ws) { 8308 t.Fatalf("bad") 8309 } 8310 } 8311 8312 func TestStateStore_ACLTokenByAccessorIDPrefix(t *testing.T) { 8313 t.Parallel() 8314 8315 state := testStateStore(t) 8316 prefixes := []string{ 8317 "aaaa", 8318 "aabb", 8319 "bbbb", 8320 "bbcc", 8321 "ffff", 8322 } 8323 8324 // Create the tokens 8325 var baseIndex uint64 = 1000 8326 for _, prefix := range prefixes { 8327 tk := mock.ACLToken() 8328 tk.AccessorID = prefix + tk.AccessorID[4:] 8329 if err := state.UpsertACLTokens(baseIndex, []*structs.ACLToken{tk}); err != nil { 8330 t.Fatalf("err: %v", err) 8331 } 8332 baseIndex++ 8333 } 8334 8335 // Scan by prefix 8336 iter, err := state.ACLTokenByAccessorIDPrefix(nil, "aa") 8337 if err != nil { 8338 t.Fatalf("err: %v", err) 8339 } 8340 8341 // Ensure we see both tokens 8342 count := 0 8343 out := []string{} 8344 for { 8345 raw := iter.Next() 8346 if raw == nil { 8347 break 8348 } 8349 count++ 8350 out = append(out, raw.(*structs.ACLToken).AccessorID[:4]) 8351 } 8352 if count != 2 { 8353 t.Fatalf("bad: %d %v", count, out) 8354 } 8355 sort.Strings(out) 8356 8357 expect := []string{"aaaa", "aabb"} 8358 assert.Equal(t, expect, out) 8359 } 8360 8361 func TestStateStore_RestoreACLPolicy(t *testing.T) { 8362 t.Parallel() 8363 8364 state := testStateStore(t) 8365 policy := mock.ACLPolicy() 8366 8367 restore, err := state.Restore() 8368 if err != nil { 8369 t.Fatalf("err: %v", err) 8370 } 8371 8372 err = restore.ACLPolicyRestore(policy) 8373 if err != nil { 8374 t.Fatalf("err: %v", err) 8375 } 8376 restore.Commit() 8377 8378 ws := memdb.NewWatchSet() 8379 out, err := state.ACLPolicyByName(ws, policy.Name) 8380 if err != nil { 8381 t.Fatalf("err: %v", err) 8382 } 8383 assert.Equal(t, policy, out) 8384 } 8385 8386 func TestStateStore_ACLTokensByGlobal(t *testing.T) { 8387 t.Parallel() 8388 8389 state := testStateStore(t) 8390 tk1 := mock.ACLToken() 8391 tk2 := mock.ACLToken() 8392 tk3 := mock.ACLToken() 8393 tk4 := mock.ACLToken() 8394 tk3.Global = true 8395 8396 if err := state.UpsertACLTokens(1000, 8397 []*structs.ACLToken{tk1, tk2, tk3, tk4}); err != nil { 8398 t.Fatalf("err: %v", err) 8399 } 8400 8401 iter, err := state.ACLTokensByGlobal(nil, true) 8402 if err != nil { 8403 t.Fatalf("err: %v", err) 8404 } 8405 8406 // Ensure we see the one global policies 8407 count := 0 8408 for { 8409 raw := iter.Next() 8410 if raw == nil { 8411 break 8412 } 8413 count++ 8414 } 8415 if count != 1 { 8416 t.Fatalf("bad: %d", count) 8417 } 8418 } 8419 8420 func TestStateStore_RestoreACLToken(t *testing.T) { 8421 t.Parallel() 8422 8423 state := testStateStore(t) 8424 token := mock.ACLToken() 8425 8426 restore, err := state.Restore() 8427 if err != nil { 8428 t.Fatalf("err: %v", err) 8429 } 8430 8431 err = restore.ACLTokenRestore(token) 8432 if err != nil { 8433 t.Fatalf("err: %v", err) 8434 } 8435 restore.Commit() 8436 8437 ws := memdb.NewWatchSet() 8438 out, err := state.ACLTokenByAccessorID(ws, token.AccessorID) 8439 if err != nil { 8440 t.Fatalf("err: %v", err) 8441 } 8442 assert.Equal(t, token, out) 8443 } 8444 8445 func TestStateStore_SchedulerConfig(t *testing.T) { 8446 t.Parallel() 8447 8448 state := testStateStore(t) 8449 schedConfig := &structs.SchedulerConfiguration{ 8450 PreemptionConfig: structs.PreemptionConfig{ 8451 SystemSchedulerEnabled: false, 8452 }, 8453 CreateIndex: 100, 8454 ModifyIndex: 200, 8455 } 8456 8457 require := require.New(t) 8458 restore, err := state.Restore() 8459 require.Nil(err) 8460 8461 err = restore.SchedulerConfigRestore(schedConfig) 8462 require.Nil(err) 8463 8464 restore.Commit() 8465 8466 modIndex, out, err := state.SchedulerConfig() 8467 require.Nil(err) 8468 require.Equal(schedConfig.ModifyIndex, modIndex) 8469 8470 require.Equal(schedConfig, out) 8471 } 8472 8473 func TestStateStore_ClusterMetadata(t *testing.T) { 8474 require := require.New(t) 8475 8476 state := testStateStore(t) 8477 clusterID := "12345678-1234-1234-1234-1234567890" 8478 now := time.Now().UnixNano() 8479 meta := &structs.ClusterMetadata{ClusterID: clusterID, CreateTime: now} 8480 8481 err := state.ClusterSetMetadata(100, meta) 8482 require.NoError(err) 8483 8484 result, err := state.ClusterMetadata() 8485 require.NoError(err) 8486 require.Equal(clusterID, result.ClusterID) 8487 require.Equal(now, result.CreateTime) 8488 } 8489 8490 func TestStateStore_ClusterMetadataRestore(t *testing.T) { 8491 require := require.New(t) 8492 8493 state := testStateStore(t) 8494 clusterID := "12345678-1234-1234-1234-1234567890" 8495 now := time.Now().UnixNano() 8496 meta := &structs.ClusterMetadata{ClusterID: clusterID, CreateTime: now} 8497 8498 restore, err := state.Restore() 8499 require.NoError(err) 8500 8501 err = restore.ClusterMetadataRestore(meta) 8502 require.NoError(err) 8503 8504 restore.Commit() 8505 8506 out, err := state.ClusterMetadata() 8507 require.NoError(err) 8508 require.Equal(clusterID, out.ClusterID) 8509 require.Equal(now, out.CreateTime) 8510 } 8511 8512 func TestStateStore_RestoreScalingPolicy(t *testing.T) { 8513 t.Parallel() 8514 require := require.New(t) 8515 8516 state := testStateStore(t) 8517 scalingPolicy := mock.ScalingPolicy() 8518 8519 restore, err := state.Restore() 8520 require.NoError(err) 8521 8522 err = restore.ScalingPolicyRestore(scalingPolicy) 8523 require.NoError(err) 8524 restore.Commit() 8525 8526 ws := memdb.NewWatchSet() 8527 out, err := state.ScalingPolicyByID(ws, scalingPolicy.ID) 8528 require.NoError(err) 8529 require.EqualValues(out, scalingPolicy) 8530 } 8531 8532 func TestStateStore_UpsertScalingPolicy(t *testing.T) { 8533 t.Parallel() 8534 require := require.New(t) 8535 8536 state := testStateStore(t) 8537 policy := mock.ScalingPolicy() 8538 policy2 := mock.ScalingPolicy() 8539 8540 wsAll := memdb.NewWatchSet() 8541 all, err := state.ScalingPolicies(wsAll) 8542 require.NoError(err) 8543 require.Nil(all.Next()) 8544 8545 ws := memdb.NewWatchSet() 8546 out, err := state.ScalingPolicyByTarget(ws, policy.Target) 8547 require.NoError(err) 8548 require.Nil(out) 8549 8550 out, err = state.ScalingPolicyByTarget(ws, policy2.Target) 8551 require.NoError(err) 8552 require.Nil(out) 8553 8554 err = state.UpsertScalingPolicies(1000, []*structs.ScalingPolicy{policy, policy2}) 8555 require.NoError(err) 8556 require.True(watchFired(ws)) 8557 require.True(watchFired(wsAll)) 8558 8559 ws = memdb.NewWatchSet() 8560 out, err = state.ScalingPolicyByTarget(ws, policy.Target) 8561 require.NoError(err) 8562 require.Equal(policy, out) 8563 8564 out, err = state.ScalingPolicyByTarget(ws, policy2.Target) 8565 require.NoError(err) 8566 require.Equal(policy2, out) 8567 8568 iter, err := state.ScalingPolicies(ws) 8569 require.NoError(err) 8570 8571 // Ensure we see both policies 8572 count := 0 8573 for { 8574 raw := iter.Next() 8575 if raw == nil { 8576 break 8577 } 8578 count++ 8579 } 8580 require.Equal(2, count) 8581 8582 index, err := state.Index("scaling_policy") 8583 require.NoError(err) 8584 require.True(1000 == index) 8585 require.False(watchFired(ws)) 8586 } 8587 8588 func TestStateStore_UpsertScalingPolicy_Namespace(t *testing.T) { 8589 t.Parallel() 8590 require := require.New(t) 8591 8592 otherNamespace := "not-default-namespace" 8593 state := testStateStore(t) 8594 policy := mock.ScalingPolicy() 8595 policy2 := mock.ScalingPolicy() 8596 policy2.Target[structs.ScalingTargetNamespace] = otherNamespace 8597 8598 ws1 := memdb.NewWatchSet() 8599 iter, err := state.ScalingPoliciesByNamespace(ws1, structs.DefaultNamespace) 8600 require.NoError(err) 8601 require.Nil(iter.Next()) 8602 8603 ws2 := memdb.NewWatchSet() 8604 iter, err = state.ScalingPoliciesByNamespace(ws2, otherNamespace) 8605 require.NoError(err) 8606 require.Nil(iter.Next()) 8607 8608 err = state.UpsertScalingPolicies(1000, []*structs.ScalingPolicy{policy, policy2}) 8609 require.NoError(err) 8610 require.True(watchFired(ws1)) 8611 require.True(watchFired(ws2)) 8612 8613 iter, err = state.ScalingPoliciesByNamespace(nil, structs.DefaultNamespace) 8614 require.NoError(err) 8615 policiesInDefaultNamespace := []string{} 8616 for { 8617 raw := iter.Next() 8618 if raw == nil { 8619 break 8620 } 8621 policiesInDefaultNamespace = append(policiesInDefaultNamespace, raw.(*structs.ScalingPolicy).ID) 8622 } 8623 require.ElementsMatch([]string{policy.ID}, policiesInDefaultNamespace) 8624 8625 iter, err = state.ScalingPoliciesByNamespace(nil, otherNamespace) 8626 require.NoError(err) 8627 policiesInOtherNamespace := []string{} 8628 for { 8629 raw := iter.Next() 8630 if raw == nil { 8631 break 8632 } 8633 policiesInOtherNamespace = append(policiesInOtherNamespace, raw.(*structs.ScalingPolicy).ID) 8634 } 8635 require.ElementsMatch([]string{policy2.ID}, policiesInOtherNamespace) 8636 } 8637 8638 func TestStateStore_UpsertJob_UpsertScalingPolicies(t *testing.T) { 8639 t.Parallel() 8640 8641 require := require.New(t) 8642 8643 state := testStateStore(t) 8644 job, policy := mock.JobWithScalingPolicy() 8645 8646 // Create a watchset so we can test that upsert fires the watch 8647 ws := memdb.NewWatchSet() 8648 out, err := state.ScalingPolicyByTarget(ws, policy.Target) 8649 require.NoError(err) 8650 require.Nil(out) 8651 8652 var newIndex uint64 = 1000 8653 err = state.UpsertJob(newIndex, job) 8654 require.NoError(err) 8655 require.True(watchFired(ws), "watch did not fire") 8656 8657 ws = memdb.NewWatchSet() 8658 out, err = state.ScalingPolicyByTarget(ws, policy.Target) 8659 require.NoError(err) 8660 require.NotNil(out) 8661 require.Equal(newIndex, out.CreateIndex) 8662 require.Equal(newIndex, out.ModifyIndex) 8663 8664 index, err := state.Index("scaling_policy") 8665 require.Equal(newIndex, index) 8666 } 8667 8668 // Scaling Policy IDs are generated randomly during Job.Register 8669 // Subsequent updates of the job should preserve the ID for the scaling policy 8670 // associated with a given target. 8671 func TestStateStore_UpsertJob_PreserveScalingPolicyIDsAndIndex(t *testing.T) { 8672 t.Parallel() 8673 8674 require := require.New(t) 8675 8676 state := testStateStore(t) 8677 job, policy := mock.JobWithScalingPolicy() 8678 8679 var newIndex uint64 = 1000 8680 err := state.UpsertJob(newIndex, job) 8681 require.NoError(err) 8682 8683 ws := memdb.NewWatchSet() 8684 p1, err := state.ScalingPolicyByTarget(ws, policy.Target) 8685 require.NoError(err) 8686 require.NotNil(p1) 8687 require.Equal(newIndex, p1.CreateIndex) 8688 require.Equal(newIndex, p1.ModifyIndex) 8689 8690 index, err := state.Index("scaling_policy") 8691 require.Equal(newIndex, index) 8692 require.NotEmpty(p1.ID) 8693 8694 // update the job 8695 job.Meta["new-meta"] = "new-value" 8696 newIndex += 100 8697 err = state.UpsertJob(newIndex, job) 8698 require.NoError(err) 8699 require.False(watchFired(ws), "watch should not have fired") 8700 8701 p2, err := state.ScalingPolicyByTarget(nil, policy.Target) 8702 require.NoError(err) 8703 require.NotNil(p2) 8704 require.Equal(p1.ID, p2.ID, "ID should not have changed") 8705 require.Equal(p1.CreateIndex, p2.CreateIndex) 8706 require.Equal(p1.ModifyIndex, p2.ModifyIndex) 8707 8708 index, err = state.Index("scaling_policy") 8709 require.Equal(index, p1.CreateIndex, "table index should not have changed") 8710 } 8711 8712 // Updating the scaling policy for a job should update the index table and fire the watch. 8713 // This test is the converse of TestStateStore_UpsertJob_PreserveScalingPolicyIDsAndIndex 8714 func TestStateStore_UpsertJob_UpdateScalingPolicy(t *testing.T) { 8715 t.Parallel() 8716 8717 require := require.New(t) 8718 8719 state := testStateStore(t) 8720 job, policy := mock.JobWithScalingPolicy() 8721 8722 var oldIndex uint64 = 1000 8723 require.NoError(state.UpsertJob(oldIndex, job)) 8724 8725 ws := memdb.NewWatchSet() 8726 p1, err := state.ScalingPolicyByTarget(ws, policy.Target) 8727 require.NoError(err) 8728 require.NotNil(p1) 8729 require.Equal(oldIndex, p1.CreateIndex) 8730 require.Equal(oldIndex, p1.ModifyIndex) 8731 prevId := p1.ID 8732 8733 index, err := state.Index("scaling_policy") 8734 require.Equal(oldIndex, index) 8735 require.NotEmpty(p1.ID) 8736 8737 // update the job with the updated scaling policy; make sure to use a different object 8738 newPolicy := structs.CopyScalingPolicy(p1) 8739 newPolicy.Policy["new-field"] = "new-value" 8740 job.TaskGroups[0].Scaling = newPolicy 8741 require.NoError(state.UpsertJob(oldIndex+100, job)) 8742 require.True(watchFired(ws), "watch should have fired") 8743 8744 p2, err := state.ScalingPolicyByTarget(nil, policy.Target) 8745 require.NoError(err) 8746 require.NotNil(p2) 8747 require.Equal(p2.Policy["new-field"], "new-value") 8748 require.Equal(prevId, p2.ID, "ID should not have changed") 8749 require.Equal(oldIndex, p2.CreateIndex) 8750 require.Greater(p2.ModifyIndex, oldIndex, "ModifyIndex should have advanced") 8751 8752 index, err = state.Index("scaling_policy") 8753 require.Greater(index, oldIndex, "table index should have advanced") 8754 } 8755 8756 func TestStateStore_DeleteScalingPolicies(t *testing.T) { 8757 t.Parallel() 8758 8759 require := require.New(t) 8760 8761 state := testStateStore(t) 8762 policy := mock.ScalingPolicy() 8763 policy2 := mock.ScalingPolicy() 8764 8765 // Create the policy 8766 err := state.UpsertScalingPolicies(1000, []*structs.ScalingPolicy{policy, policy2}) 8767 require.NoError(err) 8768 8769 // Create a watcher 8770 ws := memdb.NewWatchSet() 8771 _, err = state.ScalingPolicyByTarget(ws, policy.Target) 8772 require.NoError(err) 8773 8774 // Delete the policy 8775 err = state.DeleteScalingPolicies(1001, []string{policy.ID, policy2.ID}) 8776 require.NoError(err) 8777 8778 // Ensure watching triggered 8779 require.True(watchFired(ws)) 8780 8781 // Ensure we don't get the objects back 8782 ws = memdb.NewWatchSet() 8783 out, err := state.ScalingPolicyByTarget(ws, policy.Target) 8784 require.NoError(err) 8785 require.Nil(out) 8786 8787 ws = memdb.NewWatchSet() 8788 out, err = state.ScalingPolicyByTarget(ws, policy2.Target) 8789 require.NoError(err) 8790 require.Nil(out) 8791 8792 // Ensure we see both policies 8793 iter, err := state.ScalingPoliciesByNamespace(ws, policy.Target[structs.ScalingTargetNamespace]) 8794 require.NoError(err) 8795 count := 0 8796 for { 8797 raw := iter.Next() 8798 if raw == nil { 8799 break 8800 } 8801 count++ 8802 } 8803 require.Equal(0, count) 8804 8805 index, err := state.Index("scaling_policy") 8806 require.NoError(err) 8807 require.True(1001 == index) 8808 require.False(watchFired(ws)) 8809 } 8810 8811 func TestStateStore_StopJob_DeleteScalingPolicies(t *testing.T) { 8812 t.Parallel() 8813 8814 require := require.New(t) 8815 8816 state := testStateStore(t) 8817 8818 job := mock.Job() 8819 8820 err := state.UpsertJob(1000, job) 8821 require.NoError(err) 8822 8823 policy := mock.ScalingPolicy() 8824 policy.Target[structs.ScalingTargetJob] = job.ID 8825 err = state.UpsertScalingPolicies(1100, []*structs.ScalingPolicy{policy}) 8826 require.NoError(err) 8827 8828 // Ensure the scaling policy is present and start some watches 8829 wsGet := memdb.NewWatchSet() 8830 out, err := state.ScalingPolicyByTarget(wsGet, policy.Target) 8831 require.NoError(err) 8832 require.NotNil(out) 8833 wsList := memdb.NewWatchSet() 8834 _, err = state.ScalingPolicies(wsList) 8835 require.NoError(err) 8836 8837 // Stop the job 8838 job, err = state.JobByID(nil, job.Namespace, job.ID) 8839 require.NoError(err) 8840 job.Stop = true 8841 err = state.UpsertJob(1200, job) 8842 require.NoError(err) 8843 8844 // Ensure: 8845 // * the scaling policy was deleted 8846 // * the watches were fired 8847 // * the table index was advanced 8848 require.True(watchFired(wsGet)) 8849 require.True(watchFired(wsList)) 8850 out, err = state.ScalingPolicyByTarget(nil, policy.Target) 8851 require.NoError(err) 8852 require.Nil(out) 8853 index, err := state.Index("scaling_policy") 8854 require.GreaterOrEqual(index, uint64(1200)) 8855 } 8856 8857 func TestStateStore_UnstopJob_UpsertScalingPolicies(t *testing.T) { 8858 t.Parallel() 8859 8860 require := require.New(t) 8861 8862 state := testStateStore(t) 8863 8864 job, policy := mock.JobWithScalingPolicy() 8865 job.Stop = true 8866 8867 // establish watcher, verify there are no scaling policies yet 8868 ws := memdb.NewWatchSet() 8869 list, err := state.ScalingPolicies(ws) 8870 require.NoError(err) 8871 require.Nil(list.Next()) 8872 8873 // upsert a stopped job, verify that we don't fire the watcher or add any scaling policies 8874 err = state.UpsertJob(1000, job) 8875 require.NoError(err) 8876 require.False(watchFired(ws)) 8877 // stopped job should have no scaling policies, watcher doesn't fire 8878 list, err = state.ScalingPolicies(ws) 8879 require.NoError(err) 8880 require.Nil(list.Next()) 8881 8882 // Establish a new watcher 8883 ws = memdb.NewWatchSet() 8884 _, err = state.ScalingPolicies(ws) 8885 require.NoError(err) 8886 // Unstop this job, say you'll run it again... 8887 job.Stop = false 8888 err = state.UpsertJob(1100, job) 8889 require.NoError(err) 8890 8891 // Ensure the scaling policy was added, watch was fired, index was advanced 8892 require.True(watchFired(ws)) 8893 out, err := state.ScalingPolicyByTarget(nil, policy.Target) 8894 require.NoError(err) 8895 require.NotNil(out) 8896 index, err := state.Index("scaling_policy") 8897 require.GreaterOrEqual(index, uint64(1100)) 8898 } 8899 8900 func TestStateStore_DeleteJob_DeleteScalingPolicies(t *testing.T) { 8901 t.Parallel() 8902 8903 require := require.New(t) 8904 8905 state := testStateStore(t) 8906 8907 job := mock.Job() 8908 8909 err := state.UpsertJob(1000, job) 8910 require.NoError(err) 8911 8912 policy := mock.ScalingPolicy() 8913 policy.Target[structs.ScalingTargetJob] = job.ID 8914 err = state.UpsertScalingPolicies(1001, []*structs.ScalingPolicy{policy}) 8915 require.NoError(err) 8916 8917 // Delete the job 8918 err = state.DeleteJob(1002, job.Namespace, job.ID) 8919 require.NoError(err) 8920 8921 // Ensure the scaling policy was deleted 8922 ws := memdb.NewWatchSet() 8923 out, err := state.ScalingPolicyByTarget(ws, policy.Target) 8924 require.NoError(err) 8925 require.Nil(out) 8926 index, err := state.Index("scaling_policy") 8927 require.True(index > 1001) 8928 } 8929 8930 // This test ensures that deleting a job that doesn't have any scaling policies 8931 // will not cause the scaling_policy table index to increase, on either job 8932 // registration or deletion. 8933 func TestStateStore_DeleteJob_ScalingPolicyIndexNoop(t *testing.T) { 8934 t.Parallel() 8935 8936 require := require.New(t) 8937 8938 state := testStateStore(t) 8939 8940 job := mock.Job() 8941 8942 prevIndex, err := state.Index("scaling_policy") 8943 require.NoError(err) 8944 8945 err = state.UpsertJob(1000, job) 8946 require.NoError(err) 8947 8948 newIndex, err := state.Index("scaling_policy") 8949 require.NoError(err) 8950 require.Equal(prevIndex, newIndex) 8951 8952 // Delete the job 8953 err = state.DeleteJob(1002, job.Namespace, job.ID) 8954 require.NoError(err) 8955 8956 newIndex, err = state.Index("scaling_policy") 8957 require.NoError(err) 8958 require.Equal(prevIndex, newIndex) 8959 } 8960 8961 func TestStateStore_ScalingPoliciesByJob(t *testing.T) { 8962 t.Parallel() 8963 8964 require := require.New(t) 8965 8966 state := testStateStore(t) 8967 policyA := mock.ScalingPolicy() 8968 policyB1 := mock.ScalingPolicy() 8969 policyB2 := mock.ScalingPolicy() 8970 policyB1.Target[structs.ScalingTargetJob] = policyB2.Target[structs.ScalingTargetJob] 8971 8972 // Create the policies 8973 var baseIndex uint64 = 1000 8974 err := state.UpsertScalingPolicies(baseIndex, []*structs.ScalingPolicy{policyA, policyB1, policyB2}) 8975 require.NoError(err) 8976 8977 iter, err := state.ScalingPoliciesByJob(nil, 8978 policyA.Target[structs.ScalingTargetNamespace], 8979 policyA.Target[structs.ScalingTargetJob]) 8980 require.NoError(err) 8981 8982 // Ensure we see expected policies 8983 count := 0 8984 found := []string{} 8985 for { 8986 raw := iter.Next() 8987 if raw == nil { 8988 break 8989 } 8990 count++ 8991 found = append(found, raw.(*structs.ScalingPolicy).Target[structs.ScalingTargetGroup]) 8992 } 8993 require.Equal(1, count) 8994 sort.Strings(found) 8995 expect := []string{policyA.Target[structs.ScalingTargetGroup]} 8996 sort.Strings(expect) 8997 require.Equal(expect, found) 8998 8999 iter, err = state.ScalingPoliciesByJob(nil, 9000 policyB1.Target[structs.ScalingTargetNamespace], 9001 policyB1.Target[structs.ScalingTargetJob]) 9002 require.NoError(err) 9003 9004 // Ensure we see expected policies 9005 count = 0 9006 found = []string{} 9007 for { 9008 raw := iter.Next() 9009 if raw == nil { 9010 break 9011 } 9012 count++ 9013 found = append(found, raw.(*structs.ScalingPolicy).Target[structs.ScalingTargetGroup]) 9014 } 9015 require.Equal(2, count) 9016 sort.Strings(found) 9017 expect = []string{ 9018 policyB1.Target[structs.ScalingTargetGroup], 9019 policyB2.Target[structs.ScalingTargetGroup], 9020 } 9021 sort.Strings(expect) 9022 require.Equal(expect, found) 9023 } 9024 9025 func TestStateStore_UpsertScalingEvent(t *testing.T) { 9026 t.Parallel() 9027 require := require.New(t) 9028 9029 state := testStateStore(t) 9030 job := mock.Job() 9031 groupName := job.TaskGroups[0].Name 9032 9033 newEvent := structs.NewScalingEvent("message 1").SetMeta(map[string]interface{}{ 9034 "a": 1, 9035 }) 9036 9037 wsAll := memdb.NewWatchSet() 9038 all, err := state.ScalingEvents(wsAll) 9039 require.NoError(err) 9040 require.Nil(all.Next()) 9041 9042 ws := memdb.NewWatchSet() 9043 out, _, err := state.ScalingEventsByJob(ws, job.Namespace, job.ID) 9044 require.NoError(err) 9045 require.Nil(out) 9046 9047 err = state.UpsertScalingEvent(1000, &structs.ScalingEventRequest{ 9048 Namespace: job.Namespace, 9049 JobID: job.ID, 9050 TaskGroup: groupName, 9051 ScalingEvent: newEvent, 9052 }) 9053 require.NoError(err) 9054 require.True(watchFired(ws)) 9055 require.True(watchFired(wsAll)) 9056 9057 ws = memdb.NewWatchSet() 9058 out, eventsIndex, err := state.ScalingEventsByJob(ws, job.Namespace, job.ID) 9059 require.NoError(err) 9060 require.Equal(map[string][]*structs.ScalingEvent{ 9061 groupName: {newEvent}, 9062 }, out) 9063 require.EqualValues(eventsIndex, 1000) 9064 9065 iter, err := state.ScalingEvents(ws) 9066 require.NoError(err) 9067 9068 count := 0 9069 jobsReturned := []string{} 9070 var jobEvents *structs.JobScalingEvents 9071 for { 9072 raw := iter.Next() 9073 if raw == nil { 9074 break 9075 } 9076 jobEvents = raw.(*structs.JobScalingEvents) 9077 jobsReturned = append(jobsReturned, jobEvents.JobID) 9078 count++ 9079 } 9080 require.Equal(1, count) 9081 require.EqualValues(jobEvents.ModifyIndex, 1000) 9082 require.EqualValues(jobEvents.ScalingEvents[groupName][0].CreateIndex, 1000) 9083 9084 index, err := state.Index("scaling_event") 9085 require.NoError(err) 9086 require.ElementsMatch([]string{job.ID}, jobsReturned) 9087 require.Equal(map[string][]*structs.ScalingEvent{ 9088 groupName: {newEvent}, 9089 }, jobEvents.ScalingEvents) 9090 require.EqualValues(1000, index) 9091 require.False(watchFired(ws)) 9092 } 9093 9094 func TestStateStore_UpsertScalingEvent_LimitAndOrder(t *testing.T) { 9095 t.Parallel() 9096 require := require.New(t) 9097 9098 state := testStateStore(t) 9099 namespace := uuid.Generate() 9100 jobID := uuid.Generate() 9101 group1 := uuid.Generate() 9102 group2 := uuid.Generate() 9103 9104 index := uint64(1000) 9105 for i := 1; i <= structs.JobTrackedScalingEvents+10; i++ { 9106 newEvent := structs.NewScalingEvent("").SetMeta(map[string]interface{}{ 9107 "i": i, 9108 "group": group1, 9109 }) 9110 err := state.UpsertScalingEvent(index, &structs.ScalingEventRequest{ 9111 Namespace: namespace, 9112 JobID: jobID, 9113 TaskGroup: group1, 9114 ScalingEvent: newEvent, 9115 }) 9116 index++ 9117 require.NoError(err) 9118 9119 newEvent = structs.NewScalingEvent("").SetMeta(map[string]interface{}{ 9120 "i": i, 9121 "group": group2, 9122 }) 9123 err = state.UpsertScalingEvent(index, &structs.ScalingEventRequest{ 9124 Namespace: namespace, 9125 JobID: jobID, 9126 TaskGroup: group2, 9127 ScalingEvent: newEvent, 9128 }) 9129 index++ 9130 require.NoError(err) 9131 } 9132 9133 out, _, err := state.ScalingEventsByJob(nil, namespace, jobID) 9134 require.NoError(err) 9135 require.Len(out, 2) 9136 9137 expectedEvents := []int{} 9138 for i := structs.JobTrackedScalingEvents; i > 0; i-- { 9139 expectedEvents = append(expectedEvents, i+10) 9140 } 9141 9142 // checking order and content 9143 require.Len(out[group1], structs.JobTrackedScalingEvents) 9144 actualEvents := []int{} 9145 for _, event := range out[group1] { 9146 require.Equal(group1, event.Meta["group"]) 9147 actualEvents = append(actualEvents, event.Meta["i"].(int)) 9148 } 9149 require.Equal(expectedEvents, actualEvents) 9150 9151 // checking order and content 9152 require.Len(out[group2], structs.JobTrackedScalingEvents) 9153 actualEvents = []int{} 9154 for _, event := range out[group2] { 9155 require.Equal(group2, event.Meta["group"]) 9156 actualEvents = append(actualEvents, event.Meta["i"].(int)) 9157 } 9158 require.Equal(expectedEvents, actualEvents) 9159 } 9160 9161 func TestStateStore_RestoreScalingEvents(t *testing.T) { 9162 t.Parallel() 9163 require := require.New(t) 9164 9165 state := testStateStore(t) 9166 jobScalingEvents := &structs.JobScalingEvents{ 9167 Namespace: uuid.Generate(), 9168 JobID: uuid.Generate(), 9169 ScalingEvents: map[string][]*structs.ScalingEvent{ 9170 uuid.Generate(): { 9171 structs.NewScalingEvent(uuid.Generate()), 9172 }, 9173 }, 9174 } 9175 9176 restore, err := state.Restore() 9177 require.NoError(err) 9178 9179 err = restore.ScalingEventsRestore(jobScalingEvents) 9180 require.NoError(err) 9181 restore.Commit() 9182 9183 ws := memdb.NewWatchSet() 9184 out, _, err := state.ScalingEventsByJob(ws, jobScalingEvents.Namespace, 9185 jobScalingEvents.JobID) 9186 require.NoError(err) 9187 require.NotNil(out) 9188 require.EqualValues(jobScalingEvents.ScalingEvents, out) 9189 } 9190 9191 func TestStateStore_Abandon(t *testing.T) { 9192 t.Parallel() 9193 9194 s := testStateStore(t) 9195 abandonCh := s.AbandonCh() 9196 s.Abandon() 9197 select { 9198 case <-abandonCh: 9199 default: 9200 t.Fatalf("bad") 9201 } 9202 } 9203 9204 // Verifies that an error is returned when an allocation doesn't exist in the state store. 9205 func TestStateSnapshot_DenormalizeAllocationDiffSlice_AllocDoesNotExist(t *testing.T) { 9206 t.Parallel() 9207 9208 state := testStateStore(t) 9209 alloc := mock.Alloc() 9210 require := require.New(t) 9211 9212 // Insert job 9213 err := state.UpsertJob(999, alloc.Job) 9214 require.NoError(err) 9215 9216 allocDiffs := []*structs.AllocationDiff{ 9217 { 9218 ID: alloc.ID, 9219 }, 9220 } 9221 9222 snap, err := state.Snapshot() 9223 require.NoError(err) 9224 9225 denormalizedAllocs, err := snap.DenormalizeAllocationDiffSlice(allocDiffs) 9226 9227 require.EqualError(err, fmt.Sprintf("alloc %v doesn't exist", alloc.ID)) 9228 require.Nil(denormalizedAllocs) 9229 } 9230 9231 // TestStateStore_SnapshotMinIndex_OK asserts StateStore.SnapshotMinIndex blocks 9232 // until the StateStore's latest index is >= the requested index. 9233 func TestStateStore_SnapshotMinIndex_OK(t *testing.T) { 9234 t.Parallel() 9235 9236 s := testStateStore(t) 9237 index, err := s.LatestIndex() 9238 require.NoError(t, err) 9239 9240 node := mock.Node() 9241 require.NoError(t, s.UpsertNode(index+1, node)) 9242 9243 // Assert SnapshotMinIndex returns immediately if index < latest index 9244 ctx, cancel := context.WithTimeout(context.Background(), 0) 9245 snap, err := s.SnapshotMinIndex(ctx, index) 9246 cancel() 9247 require.NoError(t, err) 9248 9249 snapIndex, err := snap.LatestIndex() 9250 require.NoError(t, err) 9251 if snapIndex <= index { 9252 require.Fail(t, "snapshot index should be greater than index") 9253 } 9254 9255 // Assert SnapshotMinIndex returns immediately if index == latest index 9256 ctx, cancel = context.WithTimeout(context.Background(), 0) 9257 snap, err = s.SnapshotMinIndex(ctx, index+1) 9258 cancel() 9259 require.NoError(t, err) 9260 9261 snapIndex, err = snap.LatestIndex() 9262 require.NoError(t, err) 9263 require.Equal(t, snapIndex, index+1) 9264 9265 // Assert SnapshotMinIndex blocks if index > latest index 9266 errCh := make(chan error, 1) 9267 ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second) 9268 defer cancel() 9269 go func() { 9270 defer close(errCh) 9271 waitIndex := index + 2 9272 snap, err := s.SnapshotMinIndex(ctx, waitIndex) 9273 if err != nil { 9274 errCh <- err 9275 return 9276 } 9277 9278 snapIndex, err := snap.LatestIndex() 9279 if err != nil { 9280 errCh <- err 9281 return 9282 } 9283 9284 if snapIndex < waitIndex { 9285 errCh <- fmt.Errorf("snapshot index < wait index: %d < %d", snapIndex, waitIndex) 9286 return 9287 } 9288 }() 9289 9290 select { 9291 case err := <-errCh: 9292 require.NoError(t, err) 9293 case <-time.After(500 * time.Millisecond): 9294 // Let it block for a bit before unblocking by upserting 9295 } 9296 9297 node.Name = "hal" 9298 require.NoError(t, s.UpsertNode(index+2, node)) 9299 9300 select { 9301 case err := <-errCh: 9302 require.NoError(t, err) 9303 case <-time.After(5 * time.Second): 9304 require.Fail(t, "timed out waiting for SnapshotMinIndex to unblock") 9305 } 9306 } 9307 9308 // TestStateStore_SnapshotMinIndex_Timeout asserts StateStore.SnapshotMinIndex 9309 // returns an error if the desired index is not reached within the deadline. 9310 func TestStateStore_SnapshotMinIndex_Timeout(t *testing.T) { 9311 t.Parallel() 9312 9313 s := testStateStore(t) 9314 index, err := s.LatestIndex() 9315 require.NoError(t, err) 9316 9317 // Assert SnapshotMinIndex blocks if index > latest index 9318 ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) 9319 defer cancel() 9320 snap, err := s.SnapshotMinIndex(ctx, index+1) 9321 require.EqualError(t, err, context.DeadlineExceeded.Error()) 9322 require.Nil(t, snap) 9323 } 9324 9325 // watchFired is a helper for unit tests that returns if the given watch set 9326 // fired (it doesn't care which watch actually fired). This uses a fixed 9327 // timeout since we already expect the event happened before calling this and 9328 // just need to distinguish a fire from a timeout. We do need a little time to 9329 // allow the watch to set up any goroutines, though. 9330 func watchFired(ws memdb.WatchSet) bool { 9331 timedOut := ws.Watch(time.After(50 * time.Millisecond)) 9332 return !timedOut 9333 } 9334 9335 // NodeIDSort is used to sort nodes by ID 9336 type NodeIDSort []*structs.Node 9337 9338 func (n NodeIDSort) Len() int { 9339 return len(n) 9340 } 9341 9342 func (n NodeIDSort) Less(i, j int) bool { 9343 return n[i].ID < n[j].ID 9344 } 9345 9346 func (n NodeIDSort) Swap(i, j int) { 9347 n[i], n[j] = n[j], n[i] 9348 } 9349 9350 // JobIDis used to sort jobs by id 9351 type JobIDSort []*structs.Job 9352 9353 func (n JobIDSort) Len() int { 9354 return len(n) 9355 } 9356 9357 func (n JobIDSort) Less(i, j int) bool { 9358 return n[i].ID < n[j].ID 9359 } 9360 9361 func (n JobIDSort) Swap(i, j int) { 9362 n[i], n[j] = n[j], n[i] 9363 } 9364 9365 // EvalIDis used to sort evals by id 9366 type EvalIDSort []*structs.Evaluation 9367 9368 func (n EvalIDSort) Len() int { 9369 return len(n) 9370 } 9371 9372 func (n EvalIDSort) Less(i, j int) bool { 9373 return n[i].ID < n[j].ID 9374 } 9375 9376 func (n EvalIDSort) Swap(i, j int) { 9377 n[i], n[j] = n[j], n[i] 9378 } 9379 9380 // AllocIDsort used to sort allocations by id 9381 type AllocIDSort []*structs.Allocation 9382 9383 func (n AllocIDSort) Len() int { 9384 return len(n) 9385 } 9386 9387 func (n AllocIDSort) Less(i, j int) bool { 9388 return n[i].ID < n[j].ID 9389 } 9390 9391 func (n AllocIDSort) Swap(i, j int) { 9392 n[i], n[j] = n[j], n[i] 9393 }