github.com/hashicorp/nomad/api@v0.0.0-20240306165712-3193ac204f65/jobs_test.go (about)

     1  // Copyright (c) HashiCorp, Inc.
     2  // SPDX-License-Identifier: MPL-2.0
     3  
     4  package api
     5  
     6  import (
     7  	"fmt"
     8  	"sort"
     9  	"testing"
    10  	"time"
    11  
    12  	"github.com/shoenig/test/must"
    13  	"github.com/shoenig/test/wait"
    14  
    15  	"github.com/hashicorp/nomad/api/internal/testutil"
    16  )
    17  
    18  func TestJobs_Register(t *testing.T) {
    19  	testutil.Parallel(t)
    20  
    21  	c, s := makeClient(t, nil, nil)
    22  	defer s.Stop()
    23  	jobs := c.Jobs()
    24  
    25  	// Listing jobs before registering returns nothing
    26  	resp, _, err := jobs.List(nil)
    27  	must.NoError(t, err)
    28  	must.SliceEmpty(t, resp)
    29  
    30  	// Create a job and attempt to register it
    31  	job := testJob()
    32  	resp2, wm, err := jobs.Register(job, nil)
    33  	must.NoError(t, err)
    34  	must.NotNil(t, resp2)
    35  	must.UUIDv4(t, resp2.EvalID)
    36  	assertWriteMeta(t, wm)
    37  
    38  	// Query the jobs back out again
    39  	resp, qm, err := jobs.List(nil)
    40  	assertQueryMeta(t, qm)
    41  	must.Nil(t, err)
    42  
    43  	// Check that we got the expected response
    44  	must.Len(t, 1, resp)
    45  	must.Eq(t, *job.ID, resp[0].ID)
    46  }
    47  
    48  func TestJobs_Register_PreserveCounts(t *testing.T) {
    49  	testutil.Parallel(t)
    50  
    51  	c, s := makeClient(t, nil, nil)
    52  	defer s.Stop()
    53  	jobs := c.Jobs()
    54  
    55  	// Listing jobs before registering returns nothing
    56  	resp, _, err := jobs.List(nil)
    57  	must.NoError(t, err)
    58  	must.SliceEmpty(t, resp)
    59  
    60  	// Create a job
    61  	task := NewTask("task", "exec").
    62  		SetConfig("command", "/bin/sleep").
    63  		Require(&Resources{
    64  			CPU:      pointerOf(100),
    65  			MemoryMB: pointerOf(256),
    66  		}).
    67  		SetLogConfig(&LogConfig{
    68  			MaxFiles:      pointerOf(1),
    69  			MaxFileSizeMB: pointerOf(2),
    70  		})
    71  
    72  	group1 := NewTaskGroup("group1", 1).
    73  		AddTask(task).
    74  		RequireDisk(&EphemeralDisk{
    75  			SizeMB: pointerOf(25),
    76  		})
    77  	group2 := NewTaskGroup("group2", 2).
    78  		AddTask(task).
    79  		RequireDisk(&EphemeralDisk{
    80  			SizeMB: pointerOf(25),
    81  		})
    82  
    83  	job := NewBatchJob("job", "redis", "global", 1).
    84  		AddDatacenter("dc1").
    85  		AddTaskGroup(group1).
    86  		AddTaskGroup(group2)
    87  
    88  	// Create a job and register it
    89  	resp2, wm, err := jobs.Register(job, nil)
    90  	must.NoError(t, err)
    91  	must.NotNil(t, resp2)
    92  	must.UUIDv4(t, resp2.EvalID)
    93  	assertWriteMeta(t, wm)
    94  
    95  	// Update the job, new groups to test PreserveCounts
    96  	group1.Count = nil
    97  	group2.Count = pointerOf(0)
    98  	group3 := NewTaskGroup("group3", 3).
    99  		AddTask(task).
   100  		RequireDisk(&EphemeralDisk{
   101  			SizeMB: pointerOf(25),
   102  		})
   103  	job.AddTaskGroup(group3)
   104  
   105  	// Update the job, with PreserveCounts = true
   106  	_, _, err = jobs.RegisterOpts(job, &RegisterOptions{
   107  		PreserveCounts: true,
   108  	}, nil)
   109  	must.NoError(t, err)
   110  
   111  	// Query the job scale status
   112  	status, _, err := jobs.ScaleStatus(*job.ID, nil)
   113  	must.NoError(t, err)
   114  	must.Eq(t, 1, status.TaskGroups["group1"].Desired) // present and nil => preserved
   115  	must.Eq(t, 2, status.TaskGroups["group2"].Desired) // present and specified => preserved
   116  	must.Eq(t, 3, status.TaskGroups["group3"].Desired) // new => as specific in job spec
   117  }
   118  
   119  func TestJobs_Register_NoPreserveCounts(t *testing.T) {
   120  	testutil.Parallel(t)
   121  
   122  	c, s := makeClient(t, nil, nil)
   123  	defer s.Stop()
   124  	jobs := c.Jobs()
   125  
   126  	// Listing jobs before registering returns nothing
   127  	resp, _, err := jobs.List(nil)
   128  	must.NoError(t, err)
   129  	must.SliceEmpty(t, resp)
   130  
   131  	// Create a job
   132  	task := NewTask("task", "exec").
   133  		SetConfig("command", "/bin/sleep").
   134  		Require(&Resources{
   135  			CPU:      pointerOf(100),
   136  			MemoryMB: pointerOf(256),
   137  		}).
   138  		SetLogConfig(&LogConfig{
   139  			MaxFiles:      pointerOf(1),
   140  			MaxFileSizeMB: pointerOf(2),
   141  		})
   142  
   143  	group1 := NewTaskGroup("group1", 1).
   144  		AddTask(task).
   145  		RequireDisk(&EphemeralDisk{
   146  			SizeMB: pointerOf(25),
   147  		})
   148  	group2 := NewTaskGroup("group2", 2).
   149  		AddTask(task).
   150  		RequireDisk(&EphemeralDisk{
   151  			SizeMB: pointerOf(25),
   152  		})
   153  
   154  	job := NewBatchJob("job", "redis", "global", 1).
   155  		AddDatacenter("dc1").
   156  		AddTaskGroup(group1).
   157  		AddTaskGroup(group2)
   158  
   159  	// Create a job and register it
   160  	resp2, wm, err := jobs.Register(job, nil)
   161  	must.NoError(t, err)
   162  	must.NotNil(t, resp2)
   163  	must.UUIDv4(t, resp2.EvalID)
   164  	assertWriteMeta(t, wm)
   165  
   166  	// Update the job, new groups to test PreserveCounts
   167  	group1.Count = pointerOf(0)
   168  	group2.Count = nil
   169  	group3 := NewTaskGroup("group3", 3).
   170  		AddTask(task).
   171  		RequireDisk(&EphemeralDisk{
   172  			SizeMB: pointerOf(25),
   173  		})
   174  	job.AddTaskGroup(group3)
   175  
   176  	// Update the job, with PreserveCounts = default [false]
   177  	_, _, err = jobs.Register(job, nil)
   178  	must.NoError(t, err)
   179  
   180  	// Query the job scale status
   181  	status, _, err := jobs.ScaleStatus(*job.ID, nil)
   182  	must.NoError(t, err)
   183  	must.Eq(t, "default", status.Namespace)
   184  	must.Eq(t, 0, status.TaskGroups["group1"].Desired) // present => as specified
   185  	must.Eq(t, 1, status.TaskGroups["group2"].Desired) // nil     => default (1)
   186  	must.Eq(t, 3, status.TaskGroups["group3"].Desired) // new     => as specified
   187  }
   188  
   189  func TestJobs_Register_EvalPriority(t *testing.T) {
   190  	testutil.Parallel(t)
   191  
   192  	c, s := makeClient(t, nil, nil)
   193  	defer s.Stop()
   194  
   195  	// Listing jobs before registering returns nothing
   196  	listResp, _, err := c.Jobs().List(nil)
   197  	must.NoError(t, err)
   198  	must.Len(t, 0, listResp)
   199  
   200  	// Create a job and register it with an eval priority.
   201  	job := testJob()
   202  	registerResp, wm, err := c.Jobs().RegisterOpts(job, &RegisterOptions{EvalPriority: 99}, nil)
   203  	must.NoError(t, err)
   204  	must.NotNil(t, registerResp)
   205  	must.UUIDv4(t, registerResp.EvalID)
   206  	assertWriteMeta(t, wm)
   207  
   208  	// Check the created job evaluation has a priority that matches our desired
   209  	// value.
   210  	evalInfo, _, err := c.Evaluations().Info(registerResp.EvalID, nil)
   211  	must.NoError(t, err)
   212  	must.Eq(t, 99, evalInfo.Priority)
   213  }
   214  
   215  func TestJobs_Register_NoEvalPriority(t *testing.T) {
   216  	testutil.Parallel(t)
   217  
   218  	c, s := makeClient(t, nil, nil)
   219  	defer s.Stop()
   220  
   221  	// Listing jobs before registering returns nothing
   222  	listResp, _, err := c.Jobs().List(nil)
   223  	must.NoError(t, err)
   224  	must.Len(t, 0, listResp)
   225  
   226  	// Create a job and register it with an eval priority.
   227  	job := testJob()
   228  	registerResp, wm, err := c.Jobs().RegisterOpts(job, nil, nil)
   229  	must.NoError(t, err)
   230  	must.NotNil(t, registerResp)
   231  	must.UUIDv4(t, registerResp.EvalID)
   232  	assertWriteMeta(t, wm)
   233  
   234  	// Check the created job evaluation has a priority that matches the job
   235  	// priority.
   236  	evalInfo, _, err := c.Evaluations().Info(registerResp.EvalID, nil)
   237  	must.NoError(t, err)
   238  	must.Eq(t, *job.Priority, evalInfo.Priority)
   239  }
   240  
   241  func TestJobs_Validate(t *testing.T) {
   242  	testutil.Parallel(t)
   243  
   244  	c, s := makeClient(t, nil, nil)
   245  	defer s.Stop()
   246  	jobs := c.Jobs()
   247  
   248  	// Create a job and attempt to register it
   249  	job := testJob()
   250  	resp, _, err := jobs.Validate(job, nil)
   251  	must.NoError(t, err)
   252  	must.SliceEmpty(t, resp.ValidationErrors)
   253  
   254  	job.ID = nil
   255  	resp1, _, err := jobs.Validate(job, nil)
   256  	must.NoError(t, err)
   257  	must.Positive(t, len(resp1.ValidationErrors))
   258  }
   259  
   260  func TestJobs_Canonicalize(t *testing.T) {
   261  	testutil.Parallel(t)
   262  
   263  	testCases := []struct {
   264  		name     string
   265  		expected *Job
   266  		input    *Job
   267  	}{
   268  		{
   269  			name: "empty",
   270  			input: &Job{
   271  				TaskGroups: []*TaskGroup{
   272  					{
   273  						Tasks: []*Task{
   274  							{},
   275  						},
   276  					},
   277  				},
   278  			},
   279  			expected: &Job{
   280  				ID:                pointerOf(""),
   281  				Name:              pointerOf(""),
   282  				Region:            pointerOf("global"),
   283  				Namespace:         pointerOf(DefaultNamespace),
   284  				Type:              pointerOf("service"),
   285  				ParentID:          pointerOf(""),
   286  				Priority:          pointerOf(JobDefaultPriority),
   287  				NodePool:          pointerOf(""),
   288  				AllAtOnce:         pointerOf(false),
   289  				ConsulToken:       pointerOf(""),
   290  				ConsulNamespace:   pointerOf(""),
   291  				VaultToken:        pointerOf(""),
   292  				VaultNamespace:    pointerOf(""),
   293  				NomadTokenID:      pointerOf(""),
   294  				Status:            pointerOf(""),
   295  				StatusDescription: pointerOf(""),
   296  				Stop:              pointerOf(false),
   297  				Stable:            pointerOf(false),
   298  				Version:           pointerOf(uint64(0)),
   299  				CreateIndex:       pointerOf(uint64(0)),
   300  				ModifyIndex:       pointerOf(uint64(0)),
   301  				JobModifyIndex:    pointerOf(uint64(0)),
   302  				Update: &UpdateStrategy{
   303  					Stagger:          pointerOf(30 * time.Second),
   304  					MaxParallel:      pointerOf(1),
   305  					HealthCheck:      pointerOf("checks"),
   306  					MinHealthyTime:   pointerOf(10 * time.Second),
   307  					HealthyDeadline:  pointerOf(5 * time.Minute),
   308  					ProgressDeadline: pointerOf(10 * time.Minute),
   309  					AutoRevert:       pointerOf(false),
   310  					Canary:           pointerOf(0),
   311  					AutoPromote:      pointerOf(false),
   312  				},
   313  				TaskGroups: []*TaskGroup{
   314  					{
   315  						Name:                    pointerOf(""),
   316  						Count:                   pointerOf(1),
   317  						PreventRescheduleOnLost: pointerOf(false),
   318  						EphemeralDisk: &EphemeralDisk{
   319  							Sticky:  pointerOf(false),
   320  							Migrate: pointerOf(false),
   321  							SizeMB:  pointerOf(300),
   322  						},
   323  						RestartPolicy: &RestartPolicy{
   324  							Delay:           pointerOf(15 * time.Second),
   325  							Attempts:        pointerOf(2),
   326  							Interval:        pointerOf(30 * time.Minute),
   327  							Mode:            pointerOf("fail"),
   328  							RenderTemplates: pointerOf(false),
   329  						},
   330  						ReschedulePolicy: &ReschedulePolicy{
   331  							Attempts:      pointerOf(0),
   332  							Interval:      pointerOf(time.Duration(0)),
   333  							DelayFunction: pointerOf("exponential"),
   334  							Delay:         pointerOf(30 * time.Second),
   335  							MaxDelay:      pointerOf(1 * time.Hour),
   336  							Unlimited:     pointerOf(true),
   337  						},
   338  						Consul: &Consul{
   339  							Namespace: "",
   340  							Cluster:   "default",
   341  						},
   342  						Update: &UpdateStrategy{
   343  							Stagger:          pointerOf(30 * time.Second),
   344  							MaxParallel:      pointerOf(1),
   345  							HealthCheck:      pointerOf("checks"),
   346  							MinHealthyTime:   pointerOf(10 * time.Second),
   347  							HealthyDeadline:  pointerOf(5 * time.Minute),
   348  							ProgressDeadline: pointerOf(10 * time.Minute),
   349  							AutoRevert:       pointerOf(false),
   350  							Canary:           pointerOf(0),
   351  							AutoPromote:      pointerOf(false),
   352  						},
   353  						Migrate: DefaultMigrateStrategy(),
   354  						Tasks: []*Task{
   355  							{
   356  								KillTimeout:   pointerOf(5 * time.Second),
   357  								LogConfig:     DefaultLogConfig(),
   358  								Resources:     DefaultResources(),
   359  								RestartPolicy: defaultServiceJobRestartPolicy(),
   360  							},
   361  						},
   362  					},
   363  				},
   364  			},
   365  		},
   366  		{
   367  			name: "batch",
   368  			input: &Job{
   369  				Type: pointerOf("batch"),
   370  				TaskGroups: []*TaskGroup{
   371  					{
   372  						Tasks: []*Task{
   373  							{},
   374  						},
   375  					},
   376  				},
   377  			},
   378  			expected: &Job{
   379  				ID:                pointerOf(""),
   380  				Name:              pointerOf(""),
   381  				Region:            pointerOf("global"),
   382  				Namespace:         pointerOf(DefaultNamespace),
   383  				Type:              pointerOf("batch"),
   384  				ParentID:          pointerOf(""),
   385  				Priority:          pointerOf(JobDefaultPriority),
   386  				NodePool:          pointerOf(""),
   387  				AllAtOnce:         pointerOf(false),
   388  				ConsulToken:       pointerOf(""),
   389  				ConsulNamespace:   pointerOf(""),
   390  				VaultToken:        pointerOf(""),
   391  				VaultNamespace:    pointerOf(""),
   392  				NomadTokenID:      pointerOf(""),
   393  				Status:            pointerOf(""),
   394  				StatusDescription: pointerOf(""),
   395  				Stop:              pointerOf(false),
   396  				Stable:            pointerOf(false),
   397  				Version:           pointerOf(uint64(0)),
   398  				CreateIndex:       pointerOf(uint64(0)),
   399  				ModifyIndex:       pointerOf(uint64(0)),
   400  				JobModifyIndex:    pointerOf(uint64(0)),
   401  				TaskGroups: []*TaskGroup{
   402  					{
   403  						Name:                    pointerOf(""),
   404  						Count:                   pointerOf(1),
   405  						PreventRescheduleOnLost: pointerOf(false),
   406  						EphemeralDisk: &EphemeralDisk{
   407  							Sticky:  pointerOf(false),
   408  							Migrate: pointerOf(false),
   409  							SizeMB:  pointerOf(300),
   410  						},
   411  						RestartPolicy: &RestartPolicy{
   412  							Delay:           pointerOf(15 * time.Second),
   413  							Attempts:        pointerOf(3),
   414  							Interval:        pointerOf(24 * time.Hour),
   415  							Mode:            pointerOf("fail"),
   416  							RenderTemplates: pointerOf(false),
   417  						},
   418  						ReschedulePolicy: &ReschedulePolicy{
   419  							Attempts:      pointerOf(1),
   420  							Interval:      pointerOf(24 * time.Hour),
   421  							DelayFunction: pointerOf("constant"),
   422  							Delay:         pointerOf(5 * time.Second),
   423  							MaxDelay:      pointerOf(time.Duration(0)),
   424  							Unlimited:     pointerOf(false),
   425  						},
   426  						Consul: &Consul{
   427  							Namespace: "",
   428  							Cluster:   "default",
   429  						},
   430  						Tasks: []*Task{
   431  							{
   432  								KillTimeout:   pointerOf(5 * time.Second),
   433  								LogConfig:     DefaultLogConfig(),
   434  								Resources:     DefaultResources(),
   435  								RestartPolicy: defaultBatchJobRestartPolicy(),
   436  							},
   437  						},
   438  					},
   439  				},
   440  			},
   441  		},
   442  		{
   443  			name: "partial",
   444  			input: &Job{
   445  				Name:      pointerOf("foo"),
   446  				Namespace: pointerOf("bar"),
   447  				ID:        pointerOf("bar"),
   448  				ParentID:  pointerOf("lol"),
   449  				TaskGroups: []*TaskGroup{
   450  					{
   451  						Name: pointerOf("bar"),
   452  						Tasks: []*Task{
   453  							{
   454  								Name: "task1",
   455  							},
   456  						},
   457  					},
   458  				},
   459  			},
   460  			expected: &Job{
   461  				Namespace:         pointerOf("bar"),
   462  				ID:                pointerOf("bar"),
   463  				Name:              pointerOf("foo"),
   464  				Region:            pointerOf("global"),
   465  				Type:              pointerOf("service"),
   466  				ParentID:          pointerOf("lol"),
   467  				Priority:          pointerOf(JobDefaultPriority),
   468  				NodePool:          pointerOf(""),
   469  				AllAtOnce:         pointerOf(false),
   470  				ConsulToken:       pointerOf(""),
   471  				ConsulNamespace:   pointerOf(""),
   472  				VaultToken:        pointerOf(""),
   473  				VaultNamespace:    pointerOf(""),
   474  				NomadTokenID:      pointerOf(""),
   475  				Stop:              pointerOf(false),
   476  				Stable:            pointerOf(false),
   477  				Version:           pointerOf(uint64(0)),
   478  				Status:            pointerOf(""),
   479  				StatusDescription: pointerOf(""),
   480  				CreateIndex:       pointerOf(uint64(0)),
   481  				ModifyIndex:       pointerOf(uint64(0)),
   482  				JobModifyIndex:    pointerOf(uint64(0)),
   483  				Update: &UpdateStrategy{
   484  					Stagger:          pointerOf(30 * time.Second),
   485  					MaxParallel:      pointerOf(1),
   486  					HealthCheck:      pointerOf("checks"),
   487  					MinHealthyTime:   pointerOf(10 * time.Second),
   488  					HealthyDeadline:  pointerOf(5 * time.Minute),
   489  					ProgressDeadline: pointerOf(10 * time.Minute),
   490  					AutoRevert:       pointerOf(false),
   491  					Canary:           pointerOf(0),
   492  					AutoPromote:      pointerOf(false),
   493  				},
   494  				TaskGroups: []*TaskGroup{
   495  					{
   496  						Name:                    pointerOf("bar"),
   497  						PreventRescheduleOnLost: pointerOf(false),
   498  						Count:                   pointerOf(1),
   499  						EphemeralDisk: &EphemeralDisk{
   500  							Sticky:  pointerOf(false),
   501  							Migrate: pointerOf(false),
   502  							SizeMB:  pointerOf(300),
   503  						},
   504  						RestartPolicy: &RestartPolicy{
   505  							Delay:           pointerOf(15 * time.Second),
   506  							Attempts:        pointerOf(2),
   507  							Interval:        pointerOf(30 * time.Minute),
   508  							Mode:            pointerOf("fail"),
   509  							RenderTemplates: pointerOf(false),
   510  						},
   511  						ReschedulePolicy: &ReschedulePolicy{
   512  							Attempts:      pointerOf(0),
   513  							Interval:      pointerOf(time.Duration(0)),
   514  							DelayFunction: pointerOf("exponential"),
   515  							Delay:         pointerOf(30 * time.Second),
   516  							MaxDelay:      pointerOf(1 * time.Hour),
   517  							Unlimited:     pointerOf(true),
   518  						},
   519  						Consul: &Consul{
   520  							Namespace: "",
   521  							Cluster:   "default",
   522  						},
   523  						Update: &UpdateStrategy{
   524  							Stagger:          pointerOf(30 * time.Second),
   525  							MaxParallel:      pointerOf(1),
   526  							HealthCheck:      pointerOf("checks"),
   527  							MinHealthyTime:   pointerOf(10 * time.Second),
   528  							HealthyDeadline:  pointerOf(5 * time.Minute),
   529  							ProgressDeadline: pointerOf(10 * time.Minute),
   530  							AutoRevert:       pointerOf(false),
   531  							Canary:           pointerOf(0),
   532  							AutoPromote:      pointerOf(false),
   533  						},
   534  						Migrate: DefaultMigrateStrategy(),
   535  						Tasks: []*Task{
   536  							{
   537  								Name:          "task1",
   538  								LogConfig:     DefaultLogConfig(),
   539  								Resources:     DefaultResources(),
   540  								KillTimeout:   pointerOf(5 * time.Second),
   541  								RestartPolicy: defaultServiceJobRestartPolicy(),
   542  							},
   543  						},
   544  					},
   545  				},
   546  			},
   547  		},
   548  		{
   549  			name: "example_template",
   550  			input: &Job{
   551  				ID:          pointerOf("example_template"),
   552  				Name:        pointerOf("example_template"),
   553  				Datacenters: []string{"dc1"},
   554  				Type:        pointerOf("service"),
   555  				Update: &UpdateStrategy{
   556  					MaxParallel: pointerOf(1),
   557  					AutoPromote: pointerOf(true),
   558  				},
   559  				TaskGroups: []*TaskGroup{
   560  					{
   561  						Name:                    pointerOf("cache"),
   562  						Count:                   pointerOf(1),
   563  						PreventRescheduleOnLost: pointerOf(true),
   564  						RestartPolicy: &RestartPolicy{
   565  							Interval: pointerOf(5 * time.Minute),
   566  							Attempts: pointerOf(10),
   567  							Delay:    pointerOf(25 * time.Second),
   568  							Mode:     pointerOf("delay"),
   569  						},
   570  						Update: &UpdateStrategy{
   571  							AutoRevert: pointerOf(true),
   572  						},
   573  						EphemeralDisk: &EphemeralDisk{
   574  							SizeMB: pointerOf(300),
   575  						},
   576  						Tasks: []*Task{
   577  							{
   578  								Name:   "redis",
   579  								Driver: "docker",
   580  								Config: map[string]interface{}{
   581  									"image": "redis:7",
   582  									"port_map": []map[string]int{{
   583  										"db": 6379,
   584  									}},
   585  								},
   586  								RestartPolicy: &RestartPolicy{
   587  									// inherit other values from TG
   588  									Attempts: pointerOf(20),
   589  								},
   590  								Resources: &Resources{
   591  									CPU:      pointerOf(500),
   592  									MemoryMB: pointerOf(256),
   593  									Networks: []*NetworkResource{
   594  										{
   595  											MBits: pointerOf(10),
   596  											DynamicPorts: []Port{
   597  												{
   598  													Label: "db",
   599  												},
   600  											},
   601  										},
   602  									},
   603  								},
   604  								Services: []*Service{
   605  									{
   606  										Name:       "redis-cache",
   607  										Tags:       []string{"global", "cache"},
   608  										CanaryTags: []string{"canary", "global", "cache"},
   609  										PortLabel:  "db",
   610  										Checks: []ServiceCheck{
   611  											{
   612  												Name:     "alive",
   613  												Type:     "tcp",
   614  												Interval: 10 * time.Second,
   615  												Timeout:  2 * time.Second,
   616  											},
   617  										},
   618  									},
   619  								},
   620  								Templates: []*Template{
   621  									{
   622  										EmbeddedTmpl: pointerOf("---"),
   623  										DestPath:     pointerOf("local/file.yml"),
   624  									},
   625  									{
   626  										EmbeddedTmpl: pointerOf("FOO=bar\n"),
   627  										DestPath:     pointerOf("local/file.env"),
   628  										Envvars:      pointerOf(true),
   629  									},
   630  								},
   631  							},
   632  						},
   633  					},
   634  				},
   635  			},
   636  			expected: &Job{
   637  				Namespace:         pointerOf(DefaultNamespace),
   638  				ID:                pointerOf("example_template"),
   639  				Name:              pointerOf("example_template"),
   640  				ParentID:          pointerOf(""),
   641  				Priority:          pointerOf(JobDefaultPriority),
   642  				NodePool:          pointerOf(""),
   643  				Region:            pointerOf("global"),
   644  				Type:              pointerOf("service"),
   645  				AllAtOnce:         pointerOf(false),
   646  				ConsulToken:       pointerOf(""),
   647  				ConsulNamespace:   pointerOf(""),
   648  				VaultToken:        pointerOf(""),
   649  				VaultNamespace:    pointerOf(""),
   650  				NomadTokenID:      pointerOf(""),
   651  				Stop:              pointerOf(false),
   652  				Stable:            pointerOf(false),
   653  				Version:           pointerOf(uint64(0)),
   654  				Status:            pointerOf(""),
   655  				StatusDescription: pointerOf(""),
   656  				CreateIndex:       pointerOf(uint64(0)),
   657  				ModifyIndex:       pointerOf(uint64(0)),
   658  				JobModifyIndex:    pointerOf(uint64(0)),
   659  				Datacenters:       []string{"dc1"},
   660  				Update: &UpdateStrategy{
   661  					Stagger:          pointerOf(30 * time.Second),
   662  					MaxParallel:      pointerOf(1),
   663  					HealthCheck:      pointerOf("checks"),
   664  					MinHealthyTime:   pointerOf(10 * time.Second),
   665  					HealthyDeadline:  pointerOf(5 * time.Minute),
   666  					ProgressDeadline: pointerOf(10 * time.Minute),
   667  					AutoRevert:       pointerOf(false),
   668  					Canary:           pointerOf(0),
   669  					AutoPromote:      pointerOf(true),
   670  				},
   671  				TaskGroups: []*TaskGroup{
   672  					{
   673  						Name:                    pointerOf("cache"),
   674  						Count:                   pointerOf(1),
   675  						PreventRescheduleOnLost: pointerOf(true),
   676  						RestartPolicy: &RestartPolicy{
   677  							Interval:        pointerOf(5 * time.Minute),
   678  							Attempts:        pointerOf(10),
   679  							Delay:           pointerOf(25 * time.Second),
   680  							Mode:            pointerOf("delay"),
   681  							RenderTemplates: pointerOf(false),
   682  						},
   683  						ReschedulePolicy: &ReschedulePolicy{
   684  							Attempts:      pointerOf(0),
   685  							Interval:      pointerOf(time.Duration(0)),
   686  							DelayFunction: pointerOf("exponential"),
   687  							Delay:         pointerOf(30 * time.Second),
   688  							MaxDelay:      pointerOf(1 * time.Hour),
   689  							Unlimited:     pointerOf(true),
   690  						},
   691  						EphemeralDisk: &EphemeralDisk{
   692  							Sticky:  pointerOf(false),
   693  							Migrate: pointerOf(false),
   694  							SizeMB:  pointerOf(300),
   695  						},
   696  						Consul: &Consul{
   697  							Namespace: "",
   698  							Cluster:   "default",
   699  						},
   700  						Update: &UpdateStrategy{
   701  							Stagger:          pointerOf(30 * time.Second),
   702  							MaxParallel:      pointerOf(1),
   703  							HealthCheck:      pointerOf("checks"),
   704  							MinHealthyTime:   pointerOf(10 * time.Second),
   705  							HealthyDeadline:  pointerOf(5 * time.Minute),
   706  							ProgressDeadline: pointerOf(10 * time.Minute),
   707  							AutoRevert:       pointerOf(true),
   708  							Canary:           pointerOf(0),
   709  							AutoPromote:      pointerOf(true),
   710  						},
   711  						Migrate: DefaultMigrateStrategy(),
   712  						Tasks: []*Task{
   713  							{
   714  								Name:   "redis",
   715  								Driver: "docker",
   716  								Config: map[string]interface{}{
   717  									"image": "redis:7",
   718  									"port_map": []map[string]int{{
   719  										"db": 6379,
   720  									}},
   721  								},
   722  								RestartPolicy: &RestartPolicy{
   723  									Interval:        pointerOf(5 * time.Minute),
   724  									Attempts:        pointerOf(20),
   725  									Delay:           pointerOf(25 * time.Second),
   726  									Mode:            pointerOf("delay"),
   727  									RenderTemplates: pointerOf(false),
   728  								},
   729  								Resources: &Resources{
   730  									CPU:      pointerOf(500),
   731  									Cores:    pointerOf(0),
   732  									MemoryMB: pointerOf(256),
   733  									Networks: []*NetworkResource{
   734  										{
   735  											MBits: pointerOf(10),
   736  											DynamicPorts: []Port{
   737  												{
   738  													Label: "db",
   739  												},
   740  											},
   741  										},
   742  									},
   743  								},
   744  								Services: []*Service{
   745  									{
   746  										Name:        "redis-cache",
   747  										Tags:        []string{"global", "cache"},
   748  										CanaryTags:  []string{"canary", "global", "cache"},
   749  										PortLabel:   "db",
   750  										AddressMode: "auto",
   751  										OnUpdate:    "require_healthy",
   752  										Provider:    "consul",
   753  										Cluster:     "default",
   754  										Checks: []ServiceCheck{
   755  											{
   756  												Name:     "alive",
   757  												Type:     "tcp",
   758  												Interval: 10 * time.Second,
   759  												Timeout:  2 * time.Second,
   760  												OnUpdate: "require_healthy",
   761  											},
   762  										},
   763  									},
   764  								},
   765  								KillTimeout: pointerOf(5 * time.Second),
   766  								LogConfig:   DefaultLogConfig(),
   767  								Templates: []*Template{
   768  									{
   769  										SourcePath:    pointerOf(""),
   770  										DestPath:      pointerOf("local/file.yml"),
   771  										EmbeddedTmpl:  pointerOf("---"),
   772  										ChangeMode:    pointerOf("restart"),
   773  										ChangeSignal:  pointerOf(""),
   774  										Splay:         pointerOf(5 * time.Second),
   775  										Perms:         pointerOf("0644"),
   776  										LeftDelim:     pointerOf("{{"),
   777  										RightDelim:    pointerOf("}}"),
   778  										Envvars:       pointerOf(false),
   779  										VaultGrace:    pointerOf(time.Duration(0)),
   780  										ErrMissingKey: pointerOf(false),
   781  									},
   782  									{
   783  										SourcePath:    pointerOf(""),
   784  										DestPath:      pointerOf("local/file.env"),
   785  										EmbeddedTmpl:  pointerOf("FOO=bar\n"),
   786  										ChangeMode:    pointerOf("restart"),
   787  										ChangeSignal:  pointerOf(""),
   788  										Splay:         pointerOf(5 * time.Second),
   789  										Perms:         pointerOf("0644"),
   790  										LeftDelim:     pointerOf("{{"),
   791  										RightDelim:    pointerOf("}}"),
   792  										Envvars:       pointerOf(true),
   793  										VaultGrace:    pointerOf(time.Duration(0)),
   794  										ErrMissingKey: pointerOf(false),
   795  									},
   796  								},
   797  							},
   798  						},
   799  					},
   800  				},
   801  			},
   802  		},
   803  		{
   804  			name: "periodic",
   805  			input: &Job{
   806  				ID:       pointerOf("bar"),
   807  				Periodic: &PeriodicConfig{},
   808  			},
   809  			expected: &Job{
   810  				Namespace:         pointerOf(DefaultNamespace),
   811  				ID:                pointerOf("bar"),
   812  				ParentID:          pointerOf(""),
   813  				Name:              pointerOf("bar"),
   814  				Region:            pointerOf("global"),
   815  				Type:              pointerOf("service"),
   816  				Priority:          pointerOf(JobDefaultPriority),
   817  				NodePool:          pointerOf(""),
   818  				AllAtOnce:         pointerOf(false),
   819  				ConsulToken:       pointerOf(""),
   820  				ConsulNamespace:   pointerOf(""),
   821  				VaultToken:        pointerOf(""),
   822  				VaultNamespace:    pointerOf(""),
   823  				NomadTokenID:      pointerOf(""),
   824  				Stop:              pointerOf(false),
   825  				Stable:            pointerOf(false),
   826  				Version:           pointerOf(uint64(0)),
   827  				Status:            pointerOf(""),
   828  				StatusDescription: pointerOf(""),
   829  				CreateIndex:       pointerOf(uint64(0)),
   830  				ModifyIndex:       pointerOf(uint64(0)),
   831  				JobModifyIndex:    pointerOf(uint64(0)),
   832  				Update: &UpdateStrategy{
   833  					Stagger:          pointerOf(30 * time.Second),
   834  					MaxParallel:      pointerOf(1),
   835  					HealthCheck:      pointerOf("checks"),
   836  					MinHealthyTime:   pointerOf(10 * time.Second),
   837  					HealthyDeadline:  pointerOf(5 * time.Minute),
   838  					ProgressDeadline: pointerOf(10 * time.Minute),
   839  					AutoRevert:       pointerOf(false),
   840  					Canary:           pointerOf(0),
   841  					AutoPromote:      pointerOf(false),
   842  				},
   843  				Periodic: &PeriodicConfig{
   844  					Enabled:         pointerOf(true),
   845  					Spec:            pointerOf(""),
   846  					Specs:           []string{},
   847  					SpecType:        pointerOf(PeriodicSpecCron),
   848  					ProhibitOverlap: pointerOf(false),
   849  					TimeZone:        pointerOf("UTC"),
   850  				},
   851  			},
   852  		},
   853  		{
   854  			name: "update_merge",
   855  			input: &Job{
   856  				Name:     pointerOf("foo"),
   857  				ID:       pointerOf("bar"),
   858  				ParentID: pointerOf("lol"),
   859  				Update: &UpdateStrategy{
   860  					Stagger:          pointerOf(1 * time.Second),
   861  					MaxParallel:      pointerOf(1),
   862  					HealthCheck:      pointerOf("checks"),
   863  					MinHealthyTime:   pointerOf(10 * time.Second),
   864  					HealthyDeadline:  pointerOf(6 * time.Minute),
   865  					ProgressDeadline: pointerOf(7 * time.Minute),
   866  					AutoRevert:       pointerOf(false),
   867  					Canary:           pointerOf(0),
   868  					AutoPromote:      pointerOf(false),
   869  				},
   870  				TaskGroups: []*TaskGroup{
   871  					{
   872  						Name:                    pointerOf("bar"),
   873  						PreventRescheduleOnLost: pointerOf(true),
   874  						Consul: &Consul{
   875  							Namespace: "",
   876  						},
   877  						Update: &UpdateStrategy{
   878  							Stagger:        pointerOf(2 * time.Second),
   879  							MaxParallel:    pointerOf(2),
   880  							HealthCheck:    pointerOf("manual"),
   881  							MinHealthyTime: pointerOf(1 * time.Second),
   882  							AutoRevert:     pointerOf(true),
   883  							Canary:         pointerOf(1),
   884  							AutoPromote:    pointerOf(true),
   885  						},
   886  						Tasks: []*Task{
   887  							{
   888  								Name: "task1",
   889  							},
   890  						},
   891  					},
   892  					{
   893  						Name:                    pointerOf("baz"),
   894  						PreventRescheduleOnLost: pointerOf(false),
   895  						Tasks: []*Task{
   896  							{
   897  								Name: "task1",
   898  							},
   899  						},
   900  					},
   901  				},
   902  			},
   903  			expected: &Job{
   904  				Namespace:         pointerOf(DefaultNamespace),
   905  				ID:                pointerOf("bar"),
   906  				Name:              pointerOf("foo"),
   907  				Region:            pointerOf("global"),
   908  				Type:              pointerOf("service"),
   909  				ParentID:          pointerOf("lol"),
   910  				Priority:          pointerOf(JobDefaultPriority),
   911  				NodePool:          pointerOf(""),
   912  				AllAtOnce:         pointerOf(false),
   913  				ConsulToken:       pointerOf(""),
   914  				ConsulNamespace:   pointerOf(""),
   915  				VaultToken:        pointerOf(""),
   916  				VaultNamespace:    pointerOf(""),
   917  				NomadTokenID:      pointerOf(""),
   918  				Stop:              pointerOf(false),
   919  				Stable:            pointerOf(false),
   920  				Version:           pointerOf(uint64(0)),
   921  				Status:            pointerOf(""),
   922  				StatusDescription: pointerOf(""),
   923  				CreateIndex:       pointerOf(uint64(0)),
   924  				ModifyIndex:       pointerOf(uint64(0)),
   925  				JobModifyIndex:    pointerOf(uint64(0)),
   926  				Update: &UpdateStrategy{
   927  					Stagger:          pointerOf(1 * time.Second),
   928  					MaxParallel:      pointerOf(1),
   929  					HealthCheck:      pointerOf("checks"),
   930  					MinHealthyTime:   pointerOf(10 * time.Second),
   931  					HealthyDeadline:  pointerOf(6 * time.Minute),
   932  					ProgressDeadline: pointerOf(7 * time.Minute),
   933  					AutoRevert:       pointerOf(false),
   934  					Canary:           pointerOf(0),
   935  					AutoPromote:      pointerOf(false),
   936  				},
   937  				TaskGroups: []*TaskGroup{
   938  					{
   939  						Name:                    pointerOf("bar"),
   940  						Count:                   pointerOf(1),
   941  						PreventRescheduleOnLost: pointerOf(true),
   942  						EphemeralDisk: &EphemeralDisk{
   943  							Sticky:  pointerOf(false),
   944  							Migrate: pointerOf(false),
   945  							SizeMB:  pointerOf(300),
   946  						},
   947  						RestartPolicy: &RestartPolicy{
   948  							Delay:           pointerOf(15 * time.Second),
   949  							Attempts:        pointerOf(2),
   950  							Interval:        pointerOf(30 * time.Minute),
   951  							Mode:            pointerOf("fail"),
   952  							RenderTemplates: pointerOf(false),
   953  						},
   954  						ReschedulePolicy: &ReschedulePolicy{
   955  							Attempts:      pointerOf(0),
   956  							Interval:      pointerOf(time.Duration(0)),
   957  							DelayFunction: pointerOf("exponential"),
   958  							Delay:         pointerOf(30 * time.Second),
   959  							MaxDelay:      pointerOf(1 * time.Hour),
   960  							Unlimited:     pointerOf(true),
   961  						},
   962  						Consul: &Consul{
   963  							Namespace: "",
   964  							Cluster:   "default",
   965  						},
   966  						Update: &UpdateStrategy{
   967  							Stagger:          pointerOf(2 * time.Second),
   968  							MaxParallel:      pointerOf(2),
   969  							HealthCheck:      pointerOf("manual"),
   970  							MinHealthyTime:   pointerOf(1 * time.Second),
   971  							HealthyDeadline:  pointerOf(6 * time.Minute),
   972  							ProgressDeadline: pointerOf(7 * time.Minute),
   973  							AutoRevert:       pointerOf(true),
   974  							Canary:           pointerOf(1),
   975  							AutoPromote:      pointerOf(true),
   976  						},
   977  						Migrate: DefaultMigrateStrategy(),
   978  						Tasks: []*Task{
   979  							{
   980  								Name:          "task1",
   981  								LogConfig:     DefaultLogConfig(),
   982  								Resources:     DefaultResources(),
   983  								KillTimeout:   pointerOf(5 * time.Second),
   984  								RestartPolicy: defaultServiceJobRestartPolicy(),
   985  							},
   986  						},
   987  					},
   988  					{
   989  						Name:                    pointerOf("baz"),
   990  						PreventRescheduleOnLost: pointerOf(false),
   991  						Count:                   pointerOf(1),
   992  						EphemeralDisk: &EphemeralDisk{
   993  							Sticky:  pointerOf(false),
   994  							Migrate: pointerOf(false),
   995  							SizeMB:  pointerOf(300),
   996  						},
   997  						RestartPolicy: &RestartPolicy{
   998  							Delay:           pointerOf(15 * time.Second),
   999  							Attempts:        pointerOf(2),
  1000  							Interval:        pointerOf(30 * time.Minute),
  1001  							Mode:            pointerOf("fail"),
  1002  							RenderTemplates: pointerOf(false),
  1003  						},
  1004  						ReschedulePolicy: &ReschedulePolicy{
  1005  							Attempts:      pointerOf(0),
  1006  							Interval:      pointerOf(time.Duration(0)),
  1007  							DelayFunction: pointerOf("exponential"),
  1008  							Delay:         pointerOf(30 * time.Second),
  1009  							MaxDelay:      pointerOf(1 * time.Hour),
  1010  							Unlimited:     pointerOf(true),
  1011  						},
  1012  						Consul: &Consul{
  1013  							Namespace: "",
  1014  							Cluster:   "default",
  1015  						},
  1016  						Update: &UpdateStrategy{
  1017  							Stagger:          pointerOf(1 * time.Second),
  1018  							MaxParallel:      pointerOf(1),
  1019  							HealthCheck:      pointerOf("checks"),
  1020  							MinHealthyTime:   pointerOf(10 * time.Second),
  1021  							HealthyDeadline:  pointerOf(6 * time.Minute),
  1022  							ProgressDeadline: pointerOf(7 * time.Minute),
  1023  							AutoRevert:       pointerOf(false),
  1024  							Canary:           pointerOf(0),
  1025  							AutoPromote:      pointerOf(false),
  1026  						},
  1027  						Migrate: DefaultMigrateStrategy(),
  1028  						Tasks: []*Task{
  1029  							{
  1030  								Name:          "task1",
  1031  								LogConfig:     DefaultLogConfig(),
  1032  								Resources:     DefaultResources(),
  1033  								KillTimeout:   pointerOf(5 * time.Second),
  1034  								RestartPolicy: defaultServiceJobRestartPolicy(),
  1035  							},
  1036  						},
  1037  					},
  1038  				},
  1039  			},
  1040  		},
  1041  		{
  1042  			name: "restart_merge",
  1043  			input: &Job{
  1044  				Name:     pointerOf("foo"),
  1045  				ID:       pointerOf("bar"),
  1046  				ParentID: pointerOf("lol"),
  1047  				TaskGroups: []*TaskGroup{
  1048  					{
  1049  						Name:                    pointerOf("bar"),
  1050  						PreventRescheduleOnLost: pointerOf(true),
  1051  						RestartPolicy: &RestartPolicy{
  1052  							Delay:    pointerOf(15 * time.Second),
  1053  							Attempts: pointerOf(2),
  1054  							Interval: pointerOf(30 * time.Minute),
  1055  							Mode:     pointerOf("fail"),
  1056  						},
  1057  						Tasks: []*Task{
  1058  							{
  1059  								Name: "task1",
  1060  								RestartPolicy: &RestartPolicy{
  1061  									Attempts:        pointerOf(5),
  1062  									Delay:           pointerOf(1 * time.Second),
  1063  									RenderTemplates: pointerOf(true),
  1064  								},
  1065  							},
  1066  						},
  1067  					},
  1068  					{
  1069  						Name: pointerOf("baz"),
  1070  						RestartPolicy: &RestartPolicy{
  1071  							Delay:    pointerOf(20 * time.Second),
  1072  							Attempts: pointerOf(2),
  1073  							Interval: pointerOf(30 * time.Minute),
  1074  							Mode:     pointerOf("fail"),
  1075  						},
  1076  						Consul: &Consul{
  1077  							Namespace: "",
  1078  						},
  1079  						Tasks: []*Task{
  1080  							{
  1081  								Name: "task1",
  1082  							},
  1083  						},
  1084  					},
  1085  				},
  1086  			},
  1087  			expected: &Job{
  1088  				Namespace:         pointerOf(DefaultNamespace),
  1089  				ID:                pointerOf("bar"),
  1090  				Name:              pointerOf("foo"),
  1091  				Region:            pointerOf("global"),
  1092  				Type:              pointerOf("service"),
  1093  				ParentID:          pointerOf("lol"),
  1094  				NodePool:          pointerOf(""),
  1095  				Priority:          pointerOf(JobDefaultPriority),
  1096  				AllAtOnce:         pointerOf(false),
  1097  				ConsulToken:       pointerOf(""),
  1098  				ConsulNamespace:   pointerOf(""),
  1099  				VaultToken:        pointerOf(""),
  1100  				VaultNamespace:    pointerOf(""),
  1101  				NomadTokenID:      pointerOf(""),
  1102  				Stop:              pointerOf(false),
  1103  				Stable:            pointerOf(false),
  1104  				Version:           pointerOf(uint64(0)),
  1105  				Status:            pointerOf(""),
  1106  				StatusDescription: pointerOf(""),
  1107  				CreateIndex:       pointerOf(uint64(0)),
  1108  				ModifyIndex:       pointerOf(uint64(0)),
  1109  				JobModifyIndex:    pointerOf(uint64(0)),
  1110  				Update: &UpdateStrategy{
  1111  					Stagger:          pointerOf(30 * time.Second),
  1112  					MaxParallel:      pointerOf(1),
  1113  					HealthCheck:      pointerOf("checks"),
  1114  					MinHealthyTime:   pointerOf(10 * time.Second),
  1115  					HealthyDeadline:  pointerOf(5 * time.Minute),
  1116  					ProgressDeadline: pointerOf(10 * time.Minute),
  1117  					AutoRevert:       pointerOf(false),
  1118  					Canary:           pointerOf(0),
  1119  					AutoPromote:      pointerOf(false),
  1120  				},
  1121  				TaskGroups: []*TaskGroup{
  1122  					{
  1123  						Name:                    pointerOf("bar"),
  1124  						PreventRescheduleOnLost: pointerOf(true),
  1125  						Count:                   pointerOf(1),
  1126  						EphemeralDisk: &EphemeralDisk{
  1127  							Sticky:  pointerOf(false),
  1128  							Migrate: pointerOf(false),
  1129  							SizeMB:  pointerOf(300),
  1130  						},
  1131  						RestartPolicy: &RestartPolicy{
  1132  							Delay:           pointerOf(15 * time.Second),
  1133  							Attempts:        pointerOf(2),
  1134  							Interval:        pointerOf(30 * time.Minute),
  1135  							Mode:            pointerOf("fail"),
  1136  							RenderTemplates: pointerOf(false),
  1137  						},
  1138  						ReschedulePolicy: &ReschedulePolicy{
  1139  							Attempts:      pointerOf(0),
  1140  							Interval:      pointerOf(time.Duration(0)),
  1141  							DelayFunction: pointerOf("exponential"),
  1142  							Delay:         pointerOf(30 * time.Second),
  1143  							MaxDelay:      pointerOf(1 * time.Hour),
  1144  							Unlimited:     pointerOf(true),
  1145  						},
  1146  						Consul: &Consul{
  1147  							Namespace: "",
  1148  							Cluster:   "default",
  1149  						},
  1150  						Update: &UpdateStrategy{
  1151  							Stagger:          pointerOf(30 * time.Second),
  1152  							MaxParallel:      pointerOf(1),
  1153  							HealthCheck:      pointerOf("checks"),
  1154  							MinHealthyTime:   pointerOf(10 * time.Second),
  1155  							HealthyDeadline:  pointerOf(5 * time.Minute),
  1156  							ProgressDeadline: pointerOf(10 * time.Minute),
  1157  							AutoRevert:       pointerOf(false),
  1158  							Canary:           pointerOf(0),
  1159  							AutoPromote:      pointerOf(false),
  1160  						},
  1161  						Migrate: DefaultMigrateStrategy(),
  1162  						Tasks: []*Task{
  1163  							{
  1164  								Name:        "task1",
  1165  								LogConfig:   DefaultLogConfig(),
  1166  								Resources:   DefaultResources(),
  1167  								KillTimeout: pointerOf(5 * time.Second),
  1168  								RestartPolicy: &RestartPolicy{
  1169  									Attempts:        pointerOf(5),
  1170  									Delay:           pointerOf(1 * time.Second),
  1171  									Interval:        pointerOf(30 * time.Minute),
  1172  									Mode:            pointerOf("fail"),
  1173  									RenderTemplates: pointerOf(true),
  1174  								},
  1175  							},
  1176  						},
  1177  					},
  1178  					{
  1179  						Name:                    pointerOf("baz"),
  1180  						PreventRescheduleOnLost: pointerOf(false),
  1181  						Count:                   pointerOf(1),
  1182  						EphemeralDisk: &EphemeralDisk{
  1183  							Sticky:  pointerOf(false),
  1184  							Migrate: pointerOf(false),
  1185  							SizeMB:  pointerOf(300),
  1186  						},
  1187  						RestartPolicy: &RestartPolicy{
  1188  							Delay:           pointerOf(20 * time.Second),
  1189  							Attempts:        pointerOf(2),
  1190  							Interval:        pointerOf(30 * time.Minute),
  1191  							Mode:            pointerOf("fail"),
  1192  							RenderTemplates: pointerOf(false),
  1193  						},
  1194  						ReschedulePolicy: &ReschedulePolicy{
  1195  							Attempts:      pointerOf(0),
  1196  							Interval:      pointerOf(time.Duration(0)),
  1197  							DelayFunction: pointerOf("exponential"),
  1198  							Delay:         pointerOf(30 * time.Second),
  1199  							MaxDelay:      pointerOf(1 * time.Hour),
  1200  							Unlimited:     pointerOf(true),
  1201  						},
  1202  						Consul: &Consul{
  1203  							Namespace: "",
  1204  							Cluster:   "default",
  1205  						},
  1206  						Update: &UpdateStrategy{
  1207  							Stagger:          pointerOf(30 * time.Second),
  1208  							MaxParallel:      pointerOf(1),
  1209  							HealthCheck:      pointerOf("checks"),
  1210  							MinHealthyTime:   pointerOf(10 * time.Second),
  1211  							HealthyDeadline:  pointerOf(5 * time.Minute),
  1212  							ProgressDeadline: pointerOf(10 * time.Minute),
  1213  							AutoRevert:       pointerOf(false),
  1214  							Canary:           pointerOf(0),
  1215  							AutoPromote:      pointerOf(false),
  1216  						},
  1217  						Migrate: DefaultMigrateStrategy(),
  1218  						Tasks: []*Task{
  1219  							{
  1220  								Name:        "task1",
  1221  								LogConfig:   DefaultLogConfig(),
  1222  								Resources:   DefaultResources(),
  1223  								KillTimeout: pointerOf(5 * time.Second),
  1224  								RestartPolicy: &RestartPolicy{
  1225  									Delay:           pointerOf(20 * time.Second),
  1226  									Attempts:        pointerOf(2),
  1227  									Interval:        pointerOf(30 * time.Minute),
  1228  									Mode:            pointerOf("fail"),
  1229  									RenderTemplates: pointerOf(false),
  1230  								},
  1231  							},
  1232  						},
  1233  					},
  1234  				},
  1235  			},
  1236  		},
  1237  		{
  1238  			name: "multiregion",
  1239  			input: &Job{
  1240  				Name:     pointerOf("foo"),
  1241  				ID:       pointerOf("bar"),
  1242  				ParentID: pointerOf("lol"),
  1243  				Multiregion: &Multiregion{
  1244  					Regions: []*MultiregionRegion{
  1245  						{
  1246  							Name:  "west",
  1247  							Count: pointerOf(1),
  1248  						},
  1249  					},
  1250  				},
  1251  			},
  1252  			expected: &Job{
  1253  				Multiregion: &Multiregion{
  1254  					Strategy: &MultiregionStrategy{
  1255  						MaxParallel: pointerOf(0),
  1256  						OnFailure:   pointerOf(""),
  1257  					},
  1258  					Regions: []*MultiregionRegion{
  1259  						{
  1260  							Name:        "west",
  1261  							Count:       pointerOf(1),
  1262  							Datacenters: []string{},
  1263  							Meta:        map[string]string{},
  1264  						},
  1265  					},
  1266  				},
  1267  				Namespace:         pointerOf(DefaultNamespace),
  1268  				ID:                pointerOf("bar"),
  1269  				Name:              pointerOf("foo"),
  1270  				Region:            pointerOf("global"),
  1271  				Type:              pointerOf("service"),
  1272  				ParentID:          pointerOf("lol"),
  1273  				Priority:          pointerOf(JobDefaultPriority),
  1274  				NodePool:          pointerOf(""),
  1275  				AllAtOnce:         pointerOf(false),
  1276  				ConsulToken:       pointerOf(""),
  1277  				ConsulNamespace:   pointerOf(""),
  1278  				VaultToken:        pointerOf(""),
  1279  				VaultNamespace:    pointerOf(""),
  1280  				NomadTokenID:      pointerOf(""),
  1281  				Stop:              pointerOf(false),
  1282  				Stable:            pointerOf(false),
  1283  				Version:           pointerOf(uint64(0)),
  1284  				Status:            pointerOf(""),
  1285  				StatusDescription: pointerOf(""),
  1286  				CreateIndex:       pointerOf(uint64(0)),
  1287  				ModifyIndex:       pointerOf(uint64(0)),
  1288  				JobModifyIndex:    pointerOf(uint64(0)),
  1289  				Update: &UpdateStrategy{
  1290  					Stagger:          pointerOf(30 * time.Second),
  1291  					MaxParallel:      pointerOf(1),
  1292  					HealthCheck:      pointerOf("checks"),
  1293  					MinHealthyTime:   pointerOf(10 * time.Second),
  1294  					HealthyDeadline:  pointerOf(5 * time.Minute),
  1295  					ProgressDeadline: pointerOf(10 * time.Minute),
  1296  					AutoRevert:       pointerOf(false),
  1297  					Canary:           pointerOf(0),
  1298  					AutoPromote:      pointerOf(false),
  1299  				},
  1300  			},
  1301  		},
  1302  	}
  1303  
  1304  	for _, tc := range testCases {
  1305  		t.Run(tc.name, func(t *testing.T) {
  1306  			tc.input.Canonicalize()
  1307  			must.Eq(t, tc.expected, tc.input)
  1308  		})
  1309  	}
  1310  }
  1311  
  1312  func TestJobs_EnforceRegister(t *testing.T) {
  1313  	testutil.Parallel(t)
  1314  
  1315  	c, s := makeClient(t, nil, nil)
  1316  	defer s.Stop()
  1317  	jobs := c.Jobs()
  1318  
  1319  	// Listing jobs before registering returns nothing
  1320  	resp, _, err := jobs.List(nil)
  1321  	must.NoError(t, err)
  1322  	must.SliceEmpty(t, resp)
  1323  
  1324  	// Create a job and attempt to register it with an incorrect index.
  1325  	job := testJob()
  1326  	resp2, _, err := jobs.EnforceRegister(job, 10, nil)
  1327  	must.ErrorContains(t, err, RegisterEnforceIndexErrPrefix)
  1328  
  1329  	// Register
  1330  	resp2, wm, err := jobs.EnforceRegister(job, 0, nil)
  1331  	must.NoError(t, err)
  1332  	must.NotNil(t, resp2)
  1333  	must.UUIDv4(t, resp2.EvalID)
  1334  	assertWriteMeta(t, wm)
  1335  
  1336  	// Query the jobs back out again
  1337  	resp, qm, err := jobs.List(nil)
  1338  	must.NoError(t, err)
  1339  	must.Len(t, 1, resp)
  1340  	must.Eq(t, *job.ID, resp[0].ID)
  1341  	assertQueryMeta(t, qm)
  1342  
  1343  	// Fail at incorrect index
  1344  	curIndex := resp[0].JobModifyIndex
  1345  	resp2, _, err = jobs.EnforceRegister(job, 123456, nil)
  1346  	must.ErrorContains(t, err, RegisterEnforceIndexErrPrefix)
  1347  
  1348  	// Works at correct index
  1349  	resp3, wm, err := jobs.EnforceRegister(job, curIndex, nil)
  1350  	must.NoError(t, err)
  1351  	must.NotNil(t, resp3)
  1352  	must.UUIDv4(t, resp3.EvalID)
  1353  	assertWriteMeta(t, wm)
  1354  }
  1355  
  1356  func TestJobs_Revert(t *testing.T) {
  1357  	testutil.Parallel(t)
  1358  
  1359  	c, s := makeClient(t, nil, nil)
  1360  	defer s.Stop()
  1361  	jobs := c.Jobs()
  1362  
  1363  	// Register twice
  1364  	job := testJob()
  1365  	resp, wm, err := jobs.Register(job, nil)
  1366  	must.NoError(t, err)
  1367  	must.UUIDv4(t, resp.EvalID)
  1368  	assertWriteMeta(t, wm)
  1369  
  1370  	job.Meta = map[string]string{"foo": "new"}
  1371  	resp, wm, err = jobs.Register(job, nil)
  1372  	must.NoError(t, err)
  1373  	must.UUIDv4(t, resp.EvalID)
  1374  	assertWriteMeta(t, wm)
  1375  
  1376  	// Fail revert at incorrect enforce
  1377  	_, _, err = jobs.Revert(*job.ID, 0, pointerOf(uint64(10)), nil, "", "")
  1378  	must.ErrorContains(t, err, "enforcing version")
  1379  
  1380  	// Works at correct index
  1381  	revertResp, wm, err := jobs.Revert(*job.ID, 0, pointerOf(uint64(1)), nil, "", "")
  1382  	must.NoError(t, err)
  1383  	must.UUIDv4(t, revertResp.EvalID)
  1384  	must.Positive(t, revertResp.EvalCreateIndex)
  1385  	must.Positive(t, revertResp.JobModifyIndex)
  1386  	assertWriteMeta(t, wm)
  1387  }
  1388  
  1389  func TestJobs_Info(t *testing.T) {
  1390  	testutil.Parallel(t)
  1391  
  1392  	c, s := makeClient(t, nil, nil)
  1393  	defer s.Stop()
  1394  	jobs := c.Jobs()
  1395  
  1396  	// Trying to retrieve a job by ID before it exists
  1397  	// returns an error
  1398  	id := "job-id/with\\troublesome:characters\n?&字"
  1399  	_, _, err := jobs.Info(id, nil)
  1400  	must.ErrorContains(t, err, "not found")
  1401  
  1402  	// Register the job
  1403  	job := testJob()
  1404  	job.ID = &id
  1405  	_, wm, err := jobs.Register(job, nil)
  1406  	must.NoError(t, err)
  1407  	assertWriteMeta(t, wm)
  1408  
  1409  	// Query the job again and ensure it exists
  1410  	result, qm, err := jobs.Info(id, nil)
  1411  	must.NoError(t, err)
  1412  	assertQueryMeta(t, qm)
  1413  
  1414  	// Check that the result is what we expect
  1415  	must.Eq(t, *result.ID, *job.ID)
  1416  }
  1417  
  1418  func TestJobs_ScaleInvalidAction(t *testing.T) {
  1419  	testutil.Parallel(t)
  1420  
  1421  	c, s := makeClient(t, nil, nil)
  1422  	defer s.Stop()
  1423  	jobs := c.Jobs()
  1424  
  1425  	// Check if invalid inputs fail
  1426  	tests := []struct {
  1427  		jobID string
  1428  		group string
  1429  		value int
  1430  		want  string
  1431  	}{
  1432  		{"", "", 1, "404"},
  1433  		{"i-dont-exist", "", 1, "400"},
  1434  		{"", "i-dont-exist", 1, "404"},
  1435  		{"i-dont-exist", "me-neither", 1, "404"},
  1436  	}
  1437  	for _, test := range tests {
  1438  		_, _, err := jobs.Scale(test.jobID, test.group, &test.value, "reason", false, nil, nil)
  1439  		must.ErrorContains(t, err, test.want)
  1440  	}
  1441  
  1442  	// Register test job
  1443  	job := testJob()
  1444  	job.ID = pointerOf("TestJobs_Scale")
  1445  	_, wm, err := jobs.Register(job, nil)
  1446  	must.NoError(t, err)
  1447  	assertWriteMeta(t, wm)
  1448  
  1449  	// Perform a scaling action with bad group name, verify error
  1450  	_, _, err = jobs.Scale(*job.ID, "incorrect-group-name", pointerOf(2),
  1451  		"because", false, nil, nil)
  1452  	must.ErrorContains(t, err, "does not exist")
  1453  }
  1454  
  1455  func TestJobs_Versions(t *testing.T) {
  1456  	testutil.Parallel(t)
  1457  
  1458  	c, s := makeClient(t, nil, nil)
  1459  	defer s.Stop()
  1460  	jobs := c.Jobs()
  1461  
  1462  	// Trying to retrieve a job by ID before it exists returns an error
  1463  	_, _, _, err := jobs.Versions("job1", false, nil)
  1464  	must.ErrorContains(t, err, "not found")
  1465  
  1466  	// Register the job
  1467  	job := testJob()
  1468  	_, wm, err := jobs.Register(job, nil)
  1469  	must.NoError(t, err)
  1470  	assertWriteMeta(t, wm)
  1471  
  1472  	// Query the job again and ensure it exists
  1473  	result, _, qm, err := jobs.Versions("job1", false, nil)
  1474  	must.NoError(t, err)
  1475  	assertQueryMeta(t, qm)
  1476  
  1477  	// Check that the result is what we expect
  1478  	must.Eq(t, *job.ID, *result[0].ID)
  1479  }
  1480  
  1481  func TestJobs_JobSubmission_Canonicalize(t *testing.T) {
  1482  	testutil.Parallel(t)
  1483  
  1484  	t.Run("nil", func(t *testing.T) {
  1485  		var js *JobSubmission
  1486  		js.Canonicalize()
  1487  		must.Nil(t, js)
  1488  	})
  1489  
  1490  	t.Run("empty variable flags", func(t *testing.T) {
  1491  		js := &JobSubmission{
  1492  			Source:        "abc123",
  1493  			Format:        "hcl2",
  1494  			VariableFlags: make(map[string]string),
  1495  		}
  1496  		js.Canonicalize()
  1497  		must.Nil(t, js.VariableFlags)
  1498  	})
  1499  }
  1500  
  1501  func TestJobs_JobSubmission_Copy(t *testing.T) {
  1502  	testutil.Parallel(t)
  1503  
  1504  	t.Run("nil", func(t *testing.T) {
  1505  		var js *JobSubmission
  1506  		c := js.Copy()
  1507  		must.Nil(t, c)
  1508  	})
  1509  
  1510  	t.Run("copy", func(t *testing.T) {
  1511  		js := &JobSubmission{
  1512  			Source:        "source",
  1513  			Format:        "format",
  1514  			VariableFlags: map[string]string{"foo": "bar"},
  1515  			Variables:     "variables",
  1516  		}
  1517  		c := js.Copy()
  1518  		c.Source = "source2"
  1519  		c.Format = "format2"
  1520  		c.VariableFlags["foo"] = "baz"
  1521  		c.Variables = "variables2"
  1522  		must.Eq(t, &JobSubmission{
  1523  			Source:        "source",
  1524  			Format:        "format",
  1525  			VariableFlags: map[string]string{"foo": "bar"},
  1526  			Variables:     "variables",
  1527  		}, js)
  1528  	})
  1529  }
  1530  
  1531  func TestJobs_Submission_versions(t *testing.T) {
  1532  	testutil.Parallel(t)
  1533  
  1534  	c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) { c.DevMode = true })
  1535  	t.Cleanup(s.Stop)
  1536  
  1537  	jobs := c.Jobs()
  1538  
  1539  	job := testJob()
  1540  	jobID := *job.ID                       // job1
  1541  	job.TaskGroups[0].Count = pointerOf(0) // no need to actually run
  1542  
  1543  	// trying to retrieve a version before job is submitted returns a Not Found
  1544  	_, _, nfErr := jobs.Submission(jobID, 0, nil)
  1545  	must.ErrorContains(t, nfErr, "job source not found")
  1546  
  1547  	// register our test job at version 0
  1548  	job.Meta = map[string]string{"v": "0"}
  1549  	_, wm, regErr := jobs.RegisterOpts(job, &RegisterOptions{
  1550  		Submission: &JobSubmission{
  1551  			Source:        "the job source v0",
  1552  			Format:        "hcl2",
  1553  			VariableFlags: map[string]string{"X": "x", "Y": "42", "Z": "true"},
  1554  			Variables:     "var file content",
  1555  		},
  1556  	}, nil)
  1557  	must.NoError(t, regErr)
  1558  	assertWriteMeta(t, wm)
  1559  
  1560  	expectSubmission := func(sub *JobSubmission, format, source, vars string, flags map[string]string) {
  1561  		must.NotNil(t, sub, must.Sprintf("expected a non-nil job submission for job %s @ version %d", jobID, 0))
  1562  		must.Eq(t, format, sub.Format)
  1563  		must.Eq(t, source, sub.Source)
  1564  		must.Eq(t, vars, sub.Variables)
  1565  		must.MapEq(t, flags, sub.VariableFlags)
  1566  	}
  1567  
  1568  	// we should have a version 0 now
  1569  	sub, _, err := jobs.Submission(jobID, 0, nil)
  1570  	must.NoError(t, err)
  1571  	expectSubmission(sub, "hcl2", "the job source v0", "var file content", map[string]string{"X": "x", "Y": "42", "Z": "true"})
  1572  
  1573  	// register our test job at version 1
  1574  	job.Meta = map[string]string{"v": "1"}
  1575  	_, wm, regErr = jobs.RegisterOpts(job, &RegisterOptions{
  1576  		Submission: &JobSubmission{
  1577  			Source:        "the job source v1",
  1578  			Format:        "hcl2",
  1579  			VariableFlags: nil,
  1580  			Variables:     "different var content",
  1581  		},
  1582  	}, nil)
  1583  	must.NoError(t, regErr)
  1584  	assertWriteMeta(t, wm)
  1585  
  1586  	// we should have a version 1 now
  1587  	sub, _, err = jobs.Submission(jobID, 1, nil)
  1588  	must.NoError(t, err)
  1589  	expectSubmission(sub, "hcl2", "the job source v1", "different var content", nil)
  1590  
  1591  	// if we query for version 0 we should still have it
  1592  	sub, _, err = jobs.Submission(jobID, 0, nil)
  1593  	must.NoError(t, err)
  1594  	expectSubmission(sub, "hcl2", "the job source v0", "var file content", map[string]string{"X": "x", "Y": "42", "Z": "true"})
  1595  
  1596  	// deregister (and purge) the job
  1597  	_, _, err = jobs.Deregister(jobID, true, &WriteOptions{Namespace: "default"})
  1598  	must.NoError(t, err)
  1599  
  1600  	// now if we query for a submission of v0 it will be gone
  1601  	sub, _, err = jobs.Submission(jobID, 0, nil)
  1602  	must.ErrorContains(t, err, "job source not found")
  1603  	must.Nil(t, sub)
  1604  
  1605  	// same for the v1 submission
  1606  	sub, _, err = jobs.Submission(jobID, 1, nil)
  1607  	must.ErrorContains(t, err, "job source not found")
  1608  	must.Nil(t, sub)
  1609  }
  1610  
  1611  func TestJobs_Submission_namespaces(t *testing.T) {
  1612  	testutil.Parallel(t)
  1613  
  1614  	c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) { c.DevMode = true })
  1615  	t.Cleanup(s.Stop)
  1616  
  1617  	first := &Namespace{
  1618  		Name:        "first",
  1619  		Description: "first namespace",
  1620  	}
  1621  
  1622  	second := &Namespace{
  1623  		Name:        "second",
  1624  		Description: "second namespace",
  1625  	}
  1626  
  1627  	// create two namespaces
  1628  	namespaces := c.Namespaces()
  1629  	_, err := namespaces.Register(first, nil)
  1630  	must.NoError(t, err)
  1631  	_, err = namespaces.Register(second, nil)
  1632  	must.NoError(t, err)
  1633  
  1634  	jobs := c.Jobs()
  1635  
  1636  	// use the same jobID to prove we can query submissions of the same ID but
  1637  	// in different namespaces
  1638  	commonJobID := "common"
  1639  
  1640  	job := testJob()
  1641  	job.ID = pointerOf(commonJobID)
  1642  	job.TaskGroups[0].Count = pointerOf(0)
  1643  
  1644  	// register our test job into first namespace
  1645  	_, wm, err := jobs.RegisterOpts(job, &RegisterOptions{
  1646  		Submission: &JobSubmission{
  1647  			Source: "the job source",
  1648  			Format: "hcl2",
  1649  		},
  1650  	}, &WriteOptions{Namespace: "first"})
  1651  	must.NoError(t, err)
  1652  	assertWriteMeta(t, wm)
  1653  
  1654  	// if we query in the default namespace the submission should not exist
  1655  	sub, _, err := jobs.Submission(commonJobID, 0, nil)
  1656  	must.ErrorContains(t, err, "not found")
  1657  	must.Nil(t, sub)
  1658  
  1659  	// if we query in the first namespace we expect to get the submission
  1660  	sub, _, err = jobs.Submission(commonJobID, 0, &QueryOptions{Namespace: "first"})
  1661  	must.NoError(t, err)
  1662  	must.Eq(t, "the job source", sub.Source)
  1663  
  1664  	// if we query in the second namespace we expect the submission should not exist
  1665  	sub, _, err = jobs.Submission(commonJobID, 0, &QueryOptions{Namespace: "second"})
  1666  	must.ErrorContains(t, err, "not found")
  1667  	must.Nil(t, sub)
  1668  
  1669  	// create a second test job for our second namespace
  1670  	job2 := testJob()
  1671  	job2.ID = pointerOf(commonJobID)
  1672  	// keep job name redis to prove we write to correct namespace
  1673  	job.TaskGroups[0].Count = pointerOf(0)
  1674  
  1675  	// register our second job into the second namespace
  1676  	_, wm, err = jobs.RegisterOpts(job2, &RegisterOptions{
  1677  		Submission: &JobSubmission{
  1678  			Source: "second job source",
  1679  			Format: "hcl1",
  1680  		},
  1681  	}, &WriteOptions{Namespace: "second"})
  1682  	must.NoError(t, err)
  1683  	assertWriteMeta(t, wm)
  1684  
  1685  	// if we query in the default namespace the submission should not exist
  1686  	sub, _, err = jobs.Submission(commonJobID, 0, nil)
  1687  	must.ErrorContains(t, err, "not found")
  1688  	must.Nil(t, sub)
  1689  
  1690  	// if we query in the first namespace we expect to get the first job submission
  1691  	sub, _, err = jobs.Submission(commonJobID, 0, &QueryOptions{Namespace: "first"})
  1692  	must.NoError(t, err)
  1693  	must.Eq(t, "the job source", sub.Source)
  1694  
  1695  	// if we query in the second namespace we expect the second job submission
  1696  	sub, _, err = jobs.Submission(commonJobID, 0, &QueryOptions{Namespace: "second"})
  1697  	must.NoError(t, err)
  1698  	must.Eq(t, "second job source", sub.Source)
  1699  
  1700  	// if we query v1 in the first namespace we expect nothing
  1701  	sub, _, err = jobs.Submission(commonJobID, 1, &QueryOptions{Namespace: "first"})
  1702  	must.ErrorContains(t, err, "not found")
  1703  	must.Nil(t, sub)
  1704  
  1705  	// if we query v1 in the second namespace we expect nothing
  1706  	sub, _, err = jobs.Submission(commonJobID, 1, &QueryOptions{Namespace: "second"})
  1707  	must.ErrorContains(t, err, "not found")
  1708  	must.Nil(t, sub)
  1709  }
  1710  
  1711  func TestJobs_Submission_delete(t *testing.T) {
  1712  	testutil.Parallel(t)
  1713  
  1714  	c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) { c.DevMode = true })
  1715  	t.Cleanup(s.Stop)
  1716  
  1717  	first := &Namespace{
  1718  		Name:        "first",
  1719  		Description: "first namespace",
  1720  	}
  1721  
  1722  	namespaces := c.Namespaces()
  1723  	_, err := namespaces.Register(first, nil)
  1724  	must.NoError(t, err)
  1725  
  1726  	jobs := c.Jobs()
  1727  	job := testJob()
  1728  	jobID := *job.ID
  1729  	job.TaskGroups[0].Count = pointerOf(0)
  1730  	job.Meta = map[string]string{"version": "0"}
  1731  
  1732  	// register our test job into first namespace
  1733  	_, wm, err := jobs.RegisterOpts(job, &RegisterOptions{
  1734  		Submission: &JobSubmission{
  1735  			Source: "the job source v0",
  1736  			Format: "hcl2",
  1737  		},
  1738  	}, &WriteOptions{Namespace: "first"})
  1739  	must.NoError(t, err)
  1740  	assertWriteMeta(t, wm)
  1741  
  1742  	// modify the job and register it again
  1743  	job.Meta["version"] = "1"
  1744  	_, wm, err = jobs.RegisterOpts(job, &RegisterOptions{
  1745  		Submission: &JobSubmission{
  1746  			Source: "the job source v1",
  1747  			Format: "hcl2",
  1748  		},
  1749  	}, &WriteOptions{Namespace: "first"})
  1750  	must.NoError(t, err)
  1751  	assertWriteMeta(t, wm)
  1752  
  1753  	// ensure we have our submissions for both versions
  1754  	sub, _, err := jobs.Submission(jobID, 0, &QueryOptions{Namespace: "first"})
  1755  	must.NoError(t, err)
  1756  	must.Eq(t, "the job source v0", sub.Source)
  1757  
  1758  	sub, _, err = jobs.Submission(jobID, 1, &QueryOptions{Namespace: "first"})
  1759  	must.NoError(t, err)
  1760  	must.Eq(t, "the job source v1", sub.Source)
  1761  
  1762  	// deregister (and purge) the job
  1763  	_, _, err = jobs.Deregister(jobID, true, &WriteOptions{Namespace: "first"})
  1764  	must.NoError(t, err)
  1765  
  1766  	// ensure all submissions for the job are gone
  1767  	sub, _, err = jobs.Submission(jobID, 0, &QueryOptions{Namespace: "first"})
  1768  	must.ErrorContains(t, err, "job source not found")
  1769  	must.Nil(t, sub)
  1770  
  1771  	sub, _, err = jobs.Submission(jobID, 1, &QueryOptions{Namespace: "first"})
  1772  	must.ErrorContains(t, err, "job source not found")
  1773  	must.Nil(t, sub)
  1774  }
  1775  
  1776  func TestJobs_PrefixList(t *testing.T) {
  1777  	testutil.Parallel(t)
  1778  
  1779  	c, s := makeClient(t, nil, nil)
  1780  	defer s.Stop()
  1781  	jobs := c.Jobs()
  1782  
  1783  	// Listing when nothing exists returns empty
  1784  	results, _, err := jobs.PrefixList("dummy")
  1785  	must.NoError(t, err)
  1786  	must.SliceEmpty(t, results)
  1787  
  1788  	// Register the job
  1789  	job := testJob()
  1790  	_, wm, err := jobs.Register(job, nil)
  1791  	must.NoError(t, err)
  1792  	assertWriteMeta(t, wm)
  1793  
  1794  	// Query the job again and ensure it exists
  1795  	// Listing when nothing exists returns empty
  1796  	results, _, err = jobs.PrefixList((*job.ID)[:1])
  1797  	must.NoError(t, err)
  1798  
  1799  	// Check if we have the right list
  1800  	must.Len(t, 1, results)
  1801  	must.Eq(t, *job.ID, results[0].ID)
  1802  }
  1803  
  1804  func TestJobs_List(t *testing.T) {
  1805  	testutil.Parallel(t)
  1806  
  1807  	c, s := makeClient(t, nil, nil)
  1808  	defer s.Stop()
  1809  	jobs := c.Jobs()
  1810  
  1811  	// Listing when nothing exists returns empty
  1812  	results, _, err := jobs.List(nil)
  1813  	must.NoError(t, err)
  1814  	must.SliceEmpty(t, results)
  1815  
  1816  	// Register the job
  1817  	job := testJob()
  1818  	_, wm, err := jobs.Register(job, nil)
  1819  	must.NoError(t, err)
  1820  	assertWriteMeta(t, wm)
  1821  
  1822  	// Query the job again and ensure it exists
  1823  	// Listing when nothing exists returns empty
  1824  	results, _, err = jobs.List(nil)
  1825  	must.NoError(t, err)
  1826  
  1827  	// Check if we have the right list
  1828  	must.Len(t, 1, results)
  1829  	must.Eq(t, *job.ID, results[0].ID)
  1830  }
  1831  
  1832  func TestJobs_Allocations(t *testing.T) {
  1833  	testutil.Parallel(t)
  1834  
  1835  	c, s := makeClient(t, nil, nil)
  1836  	defer s.Stop()
  1837  	jobs := c.Jobs()
  1838  
  1839  	// Looking up by a nonexistent job returns nothing
  1840  	allocs, qm, err := jobs.Allocations("job1", true, nil)
  1841  	must.NoError(t, err)
  1842  	must.Zero(t, qm.LastIndex)
  1843  	must.SliceEmpty(t, allocs)
  1844  
  1845  	// TODO: do something here to create some allocations for
  1846  	// an existing job, lookup again.
  1847  }
  1848  
  1849  func TestJobs_Evaluations(t *testing.T) {
  1850  	testutil.Parallel(t)
  1851  
  1852  	c, s := makeClient(t, nil, nil)
  1853  	defer s.Stop()
  1854  	jobs := c.Jobs()
  1855  
  1856  	// Looking up by a nonexistent job ID returns nothing
  1857  	evals, qm, err := jobs.Evaluations("job1", nil)
  1858  	must.NoError(t, err)
  1859  	must.Zero(t, qm.LastIndex)
  1860  	must.SliceEmpty(t, evals)
  1861  
  1862  	// Insert a job. This also creates an evaluation so we should
  1863  	// be able to query that out after.
  1864  	job := testJob()
  1865  	resp, wm, err := jobs.Register(job, nil)
  1866  	must.NoError(t, err)
  1867  	assertWriteMeta(t, wm)
  1868  
  1869  	// Look up the evaluations again.
  1870  	evals, qm, err = jobs.Evaluations("job1", nil)
  1871  	must.NoError(t, err)
  1872  	assertQueryMeta(t, qm)
  1873  
  1874  	// Check that we got the evals back, evals are in order most recent to least recent
  1875  	// so the last eval is the original registered eval
  1876  	idx := len(evals) - 1
  1877  	must.Positive(t, len(evals))
  1878  	must.Eq(t, resp.EvalID, evals[idx].ID)
  1879  }
  1880  
  1881  func TestJobs_Deregister(t *testing.T) {
  1882  	testutil.Parallel(t)
  1883  
  1884  	c, s := makeClient(t, nil, nil)
  1885  	defer s.Stop()
  1886  	jobs := c.Jobs()
  1887  
  1888  	// Register a new job
  1889  	job := testJob()
  1890  	_, wm, err := jobs.Register(job, nil)
  1891  	must.NoError(t, err)
  1892  	assertWriteMeta(t, wm)
  1893  
  1894  	// Attempting delete on non-existing job does not return an error
  1895  	_, _, err = jobs.Deregister("nope", false, nil)
  1896  	must.NoError(t, err)
  1897  
  1898  	// Do a soft deregister of an existing job
  1899  	evalID, wm3, err := jobs.Deregister("job1", false, nil)
  1900  	must.NoError(t, err)
  1901  	assertWriteMeta(t, wm3)
  1902  	must.UUIDv4(t, evalID)
  1903  
  1904  	// Check that the job is still queryable
  1905  	out, qm1, err := jobs.Info("job1", nil)
  1906  	must.NoError(t, err)
  1907  	assertQueryMeta(t, qm1)
  1908  	must.NotNil(t, out)
  1909  
  1910  	// Do a purge deregister of an existing job
  1911  	evalID, wm4, err := jobs.Deregister("job1", true, nil)
  1912  	must.NoError(t, err)
  1913  
  1914  	assertWriteMeta(t, wm4)
  1915  	must.UUIDv4(t, evalID)
  1916  
  1917  	// Check that the job is really gone
  1918  	result, qm, err := jobs.List(nil)
  1919  	must.NoError(t, err)
  1920  
  1921  	assertQueryMeta(t, qm)
  1922  	must.SliceEmpty(t, result)
  1923  }
  1924  
  1925  func TestJobs_Deregister_EvalPriority(t *testing.T) {
  1926  	testutil.Parallel(t)
  1927  
  1928  	c, s := makeClient(t, nil, nil)
  1929  	defer s.Stop()
  1930  
  1931  	// Listing jobs before registering returns nothing
  1932  	listResp, _, err := c.Jobs().List(nil)
  1933  	must.NoError(t, err)
  1934  	must.SliceEmpty(t, listResp)
  1935  
  1936  	// Create a job and register it.
  1937  	job := testJob()
  1938  	registerResp, wm, err := c.Jobs().Register(job, nil)
  1939  	must.NoError(t, err)
  1940  	must.NotNil(t, registerResp)
  1941  	must.UUIDv4(t, registerResp.EvalID)
  1942  	assertWriteMeta(t, wm)
  1943  
  1944  	// Deregister the job with an eval priority.
  1945  	evalID, _, err := c.Jobs().DeregisterOpts(*job.ID, &DeregisterOptions{EvalPriority: 97}, nil)
  1946  	must.NoError(t, err)
  1947  	must.UUIDv4(t, evalID)
  1948  
  1949  	// Lookup the eval and check the priority on it.
  1950  	evalInfo, _, err := c.Evaluations().Info(evalID, nil)
  1951  	must.NoError(t, err)
  1952  	must.Eq(t, 97, evalInfo.Priority)
  1953  }
  1954  
  1955  func TestJobs_Deregister_NoEvalPriority(t *testing.T) {
  1956  	testutil.Parallel(t)
  1957  
  1958  	c, s := makeClient(t, nil, nil)
  1959  	defer s.Stop()
  1960  
  1961  	// Listing jobs before registering returns nothing
  1962  	listResp, _, err := c.Jobs().List(nil)
  1963  	must.NoError(t, err)
  1964  	must.SliceEmpty(t, listResp)
  1965  
  1966  	// Create a job and register it.
  1967  	job := testJob()
  1968  	registerResp, wm, err := c.Jobs().Register(job, nil)
  1969  	must.NoError(t, err)
  1970  	must.NotNil(t, registerResp)
  1971  	must.UUIDv4(t, registerResp.EvalID)
  1972  	assertWriteMeta(t, wm)
  1973  
  1974  	// Deregister the job with an eval priority.
  1975  	evalID, _, err := c.Jobs().DeregisterOpts(*job.ID, &DeregisterOptions{}, nil)
  1976  	must.NoError(t, err)
  1977  	must.UUIDv4(t, evalID)
  1978  
  1979  	// Lookup the eval and check the priority on it.
  1980  	evalInfo, _, err := c.Evaluations().Info(evalID, nil)
  1981  	must.NoError(t, err)
  1982  	must.Eq(t, *job.Priority, evalInfo.Priority)
  1983  }
  1984  
  1985  func TestJobs_ForceEvaluate(t *testing.T) {
  1986  	testutil.Parallel(t)
  1987  
  1988  	c, s := makeClient(t, nil, nil)
  1989  	defer s.Stop()
  1990  	jobs := c.Jobs()
  1991  
  1992  	// Force-eval on a non-existent job fails
  1993  	_, _, err := jobs.ForceEvaluate("job1", nil)
  1994  	must.ErrorContains(t, err, "not found")
  1995  
  1996  	// Create a new job
  1997  	_, wm, err := jobs.Register(testJob(), nil)
  1998  	must.NoError(t, err)
  1999  	assertWriteMeta(t, wm)
  2000  
  2001  	// Try force-eval again
  2002  	evalID, wm, err := jobs.ForceEvaluate("job1", nil)
  2003  	must.NoError(t, err)
  2004  	assertWriteMeta(t, wm)
  2005  
  2006  	// Retrieve the evals and see if we get a matching one
  2007  	evals, qm, err := jobs.Evaluations("job1", nil)
  2008  	must.NoError(t, err)
  2009  	assertQueryMeta(t, qm)
  2010  
  2011  	// todo(shoenig) fix must.SliceContainsFunc and use that
  2012  	// https://github.com/shoenig/test/issues/88
  2013  	for _, eval := range evals {
  2014  		if eval.ID == evalID {
  2015  			return
  2016  		}
  2017  	}
  2018  	t.Fatalf("evaluation %q missing", evalID)
  2019  }
  2020  
  2021  func TestJobs_PeriodicForce(t *testing.T) {
  2022  	testutil.Parallel(t)
  2023  
  2024  	c, s := makeClient(t, nil, nil)
  2025  	defer s.Stop()
  2026  
  2027  	jobs := c.Jobs()
  2028  
  2029  	// Force-eval on a nonexistent job fails
  2030  	_, _, err := jobs.PeriodicForce("job1", nil)
  2031  	must.ErrorContains(t, err, "not found")
  2032  
  2033  	// Create a new job
  2034  	job := testPeriodicJob()
  2035  	_, _, err = jobs.Register(job, nil)
  2036  	must.NoError(t, err)
  2037  
  2038  	f := func() error {
  2039  		out, _, err := jobs.Info(*job.ID, nil)
  2040  		if err != nil {
  2041  			return fmt.Errorf("failed to get jobs info: %w", err)
  2042  		}
  2043  		if out == nil {
  2044  			return fmt.Errorf("jobs info response is nil")
  2045  		}
  2046  		if *out.ID != *job.ID {
  2047  			return fmt.Errorf("expected job ids to match, out: %s, job: %s", *out.ID, *job.ID)
  2048  		}
  2049  		return nil
  2050  	}
  2051  	must.Wait(t, wait.InitialSuccess(
  2052  		wait.ErrorFunc(f),
  2053  		wait.Timeout(10*time.Second),
  2054  		wait.Gap(1*time.Second),
  2055  	))
  2056  
  2057  	// Try force again
  2058  	evalID, wm, err := jobs.PeriodicForce(*job.ID, nil)
  2059  	must.NoError(t, err)
  2060  
  2061  	assertWriteMeta(t, wm)
  2062  
  2063  	must.NotEq(t, "", evalID)
  2064  
  2065  	// Retrieve the eval
  2066  	evaluations := c.Evaluations()
  2067  	eval, qm, err := evaluations.Info(evalID, nil)
  2068  	must.NoError(t, err)
  2069  
  2070  	assertQueryMeta(t, qm)
  2071  	must.Eq(t, eval.ID, evalID)
  2072  }
  2073  
  2074  func TestJobs_Plan(t *testing.T) {
  2075  	testutil.Parallel(t)
  2076  
  2077  	c, s := makeClient(t, nil, nil)
  2078  	defer s.Stop()
  2079  	jobs := c.Jobs()
  2080  
  2081  	// Create a job and attempt to register it
  2082  	job := testJob()
  2083  	resp, wm, err := jobs.Register(job, nil)
  2084  	must.NoError(t, err)
  2085  	must.UUIDv4(t, resp.EvalID)
  2086  	assertWriteMeta(t, wm)
  2087  
  2088  	// Check that passing a nil job fails
  2089  	_, _, err = jobs.Plan(nil, true, nil)
  2090  	must.Error(t, err)
  2091  
  2092  	// Check that passing a nil job ID fails
  2093  	invalidJob := testJob()
  2094  	invalidJob.ID = nil
  2095  	_, _, err = jobs.Plan(invalidJob, true, nil)
  2096  	must.Error(t, err)
  2097  
  2098  	// Make a plan request
  2099  	planResp, wm, err := jobs.Plan(job, true, nil)
  2100  	must.NoError(t, err)
  2101  	must.NotNil(t, planResp)
  2102  	must.Positive(t, planResp.JobModifyIndex)
  2103  	must.NotNil(t, planResp.Diff)
  2104  	must.NotNil(t, planResp.Annotations)
  2105  	must.SliceNotEmpty(t, planResp.CreatedEvals)
  2106  	assertWriteMeta(t, wm)
  2107  
  2108  	// Make a plan request w/o the diff
  2109  	planResp, wm, err = jobs.Plan(job, false, nil)
  2110  	must.NoError(t, err)
  2111  	must.NotNil(t, planResp)
  2112  	assertWriteMeta(t, wm)
  2113  	must.Positive(t, planResp.JobModifyIndex)
  2114  	must.Nil(t, planResp.Diff)
  2115  	must.NotNil(t, planResp.Annotations)
  2116  	must.SliceNotEmpty(t, planResp.CreatedEvals)
  2117  }
  2118  
  2119  func TestJobs_JobSummary(t *testing.T) {
  2120  	testutil.Parallel(t)
  2121  
  2122  	c, s := makeClient(t, nil, nil)
  2123  	defer s.Stop()
  2124  	jobs := c.Jobs()
  2125  
  2126  	// Trying to retrieve a job summary before the job exists
  2127  	// returns an error
  2128  	_, _, err := jobs.Summary("job1", nil)
  2129  	must.ErrorContains(t, err, "not found")
  2130  
  2131  	// Register the job
  2132  	job := testJob()
  2133  	taskName := job.TaskGroups[0].Name
  2134  	_, wm, err := jobs.Register(job, nil)
  2135  	must.NoError(t, err)
  2136  	assertWriteMeta(t, wm)
  2137  
  2138  	// Query the job summary again and ensure it exists
  2139  	result, qm, err := jobs.Summary("job1", nil)
  2140  	must.NoError(t, err)
  2141  	assertQueryMeta(t, qm)
  2142  
  2143  	// Check that the result is what we expect
  2144  	must.Eq(t, *job.ID, result.JobID)
  2145  
  2146  	_, ok := result.Summary[*taskName]
  2147  	must.True(t, ok)
  2148  }
  2149  
  2150  func TestJobs_NewBatchJob(t *testing.T) {
  2151  	testutil.Parallel(t)
  2152  
  2153  	job := NewBatchJob("job1", "myjob", "global", 5)
  2154  	expect := &Job{
  2155  		Region:   pointerOf("global"),
  2156  		ID:       pointerOf("job1"),
  2157  		Name:     pointerOf("myjob"),
  2158  		Type:     pointerOf(JobTypeBatch),
  2159  		Priority: pointerOf(5),
  2160  	}
  2161  	must.Eq(t, expect, job)
  2162  }
  2163  
  2164  func TestJobs_NewServiceJob(t *testing.T) {
  2165  	testutil.Parallel(t)
  2166  
  2167  	job := NewServiceJob("job1", "myjob", "global", 5)
  2168  	expect := &Job{
  2169  		Region:   pointerOf("global"),
  2170  		ID:       pointerOf("job1"),
  2171  		Name:     pointerOf("myjob"),
  2172  		Type:     pointerOf(JobTypeService),
  2173  		Priority: pointerOf(5),
  2174  	}
  2175  	must.Eq(t, expect, job)
  2176  }
  2177  
  2178  func TestJobs_NewSystemJob(t *testing.T) {
  2179  	testutil.Parallel(t)
  2180  
  2181  	job := NewSystemJob("job1", "myjob", "global", 5)
  2182  	expect := &Job{
  2183  		Region:   pointerOf("global"),
  2184  		ID:       pointerOf("job1"),
  2185  		Name:     pointerOf("myjob"),
  2186  		Type:     pointerOf(JobTypeSystem),
  2187  		Priority: pointerOf(5),
  2188  	}
  2189  	must.Eq(t, expect, job)
  2190  }
  2191  
  2192  func TestJobs_NewSysbatchJob(t *testing.T) {
  2193  	testutil.Parallel(t)
  2194  
  2195  	job := NewSysbatchJob("job1", "myjob", "global", 5)
  2196  	expect := &Job{
  2197  		Region:   pointerOf("global"),
  2198  		ID:       pointerOf("job1"),
  2199  		Name:     pointerOf("myjob"),
  2200  		Type:     pointerOf(JobTypeSysbatch),
  2201  		Priority: pointerOf(5),
  2202  	}
  2203  	must.Eq(t, expect, job)
  2204  }
  2205  
  2206  func TestJobs_SetMeta(t *testing.T) {
  2207  	testutil.Parallel(t)
  2208  	job := &Job{Meta: nil}
  2209  
  2210  	// Initializes a nil map
  2211  	out := job.SetMeta("foo", "bar")
  2212  	must.NotNil(t, job.Meta)
  2213  
  2214  	// Check that the job was returned
  2215  	must.Eq(t, out, job)
  2216  
  2217  	// Setting another pair is additive
  2218  	job.SetMeta("baz", "zip")
  2219  	expect := map[string]string{"foo": "bar", "baz": "zip"}
  2220  	must.Eq(t, expect, job.Meta)
  2221  }
  2222  
  2223  func TestJobs_Constrain(t *testing.T) {
  2224  	testutil.Parallel(t)
  2225  
  2226  	job := &Job{Constraints: nil}
  2227  
  2228  	// Create and add a constraint
  2229  	out := job.Constrain(NewConstraint("kernel.name", "=", "darwin"))
  2230  	must.Len(t, 1, job.Constraints)
  2231  
  2232  	// Check that the job was returned
  2233  	must.Eq(t, job, out)
  2234  
  2235  	// Adding another constraint preserves the original
  2236  	job.Constrain(NewConstraint("memory.totalbytes", ">=", "128000000"))
  2237  	expect := []*Constraint{
  2238  		{
  2239  			LTarget: "kernel.name",
  2240  			RTarget: "darwin",
  2241  			Operand: "=",
  2242  		},
  2243  		{
  2244  			LTarget: "memory.totalbytes",
  2245  			RTarget: "128000000",
  2246  			Operand: ">=",
  2247  		},
  2248  	}
  2249  	must.Eq(t, expect, job.Constraints)
  2250  }
  2251  
  2252  func TestJobs_AddAffinity(t *testing.T) {
  2253  	testutil.Parallel(t)
  2254  
  2255  	job := &Job{Affinities: nil}
  2256  
  2257  	// Create and add an affinity
  2258  	out := job.AddAffinity(NewAffinity("kernel.version", "=", "4.6", 100))
  2259  	must.Len(t, 1, job.Affinities)
  2260  
  2261  	// Check that the job was returned
  2262  	must.Eq(t, job, out)
  2263  
  2264  	// Adding another affinity preserves the original
  2265  	job.AddAffinity(NewAffinity("${node.datacenter}", "=", "dc2", 50))
  2266  	expect := []*Affinity{
  2267  		{
  2268  			LTarget: "kernel.version",
  2269  			RTarget: "4.6",
  2270  			Operand: "=",
  2271  			Weight:  pointerOf(int8(100)),
  2272  		},
  2273  		{
  2274  			LTarget: "${node.datacenter}",
  2275  			RTarget: "dc2",
  2276  			Operand: "=",
  2277  			Weight:  pointerOf(int8(50)),
  2278  		},
  2279  	}
  2280  	must.Eq(t, expect, job.Affinities)
  2281  }
  2282  
  2283  func TestJobs_Sort(t *testing.T) {
  2284  	testutil.Parallel(t)
  2285  
  2286  	jobs := []*JobListStub{
  2287  		{ID: "job2"},
  2288  		{ID: "job0"},
  2289  		{ID: "job1"},
  2290  	}
  2291  	sort.Sort(JobIDSort(jobs))
  2292  
  2293  	expect := []*JobListStub{
  2294  		{ID: "job0"},
  2295  		{ID: "job1"},
  2296  		{ID: "job2"},
  2297  	}
  2298  	must.Eq(t, expect, jobs)
  2299  }
  2300  
  2301  func TestJobs_AddSpread(t *testing.T) {
  2302  	testutil.Parallel(t)
  2303  
  2304  	job := &Job{Spreads: nil}
  2305  
  2306  	// Create and add a Spread
  2307  	spreadTarget := NewSpreadTarget("r1", 50)
  2308  
  2309  	spread := NewSpread("${meta.rack}", 100, []*SpreadTarget{spreadTarget})
  2310  	out := job.AddSpread(spread)
  2311  	must.Len(t, 1, job.Spreads)
  2312  
  2313  	// Check that the job was returned
  2314  	must.Eq(t, job, out)
  2315  
  2316  	// Adding another spread preserves the original
  2317  	spreadTarget2 := NewSpreadTarget("dc1", 100)
  2318  
  2319  	spread2 := NewSpread("${node.datacenter}", 100, []*SpreadTarget{spreadTarget2})
  2320  	job.AddSpread(spread2)
  2321  
  2322  	expect := []*Spread{
  2323  		{
  2324  			Attribute: "${meta.rack}",
  2325  			Weight:    pointerOf(int8(100)),
  2326  			SpreadTarget: []*SpreadTarget{
  2327  				{
  2328  					Value:   "r1",
  2329  					Percent: 50,
  2330  				},
  2331  			},
  2332  		},
  2333  		{
  2334  			Attribute: "${node.datacenter}",
  2335  			Weight:    pointerOf(int8(100)),
  2336  			SpreadTarget: []*SpreadTarget{
  2337  				{
  2338  					Value:   "dc1",
  2339  					Percent: 100,
  2340  				},
  2341  			},
  2342  		},
  2343  	}
  2344  	must.Eq(t, expect, job.Spreads)
  2345  }
  2346  
  2347  // TestJobs_ScaleAction tests the scale target for task group count
  2348  func TestJobs_ScaleAction(t *testing.T) {
  2349  	testutil.Parallel(t)
  2350  
  2351  	c, s := makeClient(t, nil, nil)
  2352  	defer s.Stop()
  2353  	jobs := c.Jobs()
  2354  
  2355  	id := "job-id/with\\troublesome:characters\n?&字"
  2356  	job := testJobWithScalingPolicy()
  2357  	job.ID = &id
  2358  	groupName := *job.TaskGroups[0].Name
  2359  	origCount := *job.TaskGroups[0].Count
  2360  	newCount := origCount + 1
  2361  
  2362  	// Trying to scale against a target before it exists returns an error
  2363  	_, _, err := jobs.Scale(id, "missing", pointerOf(newCount), "this won't work", false, nil, nil)
  2364  	must.ErrorContains(t, err, "not found")
  2365  
  2366  	// Register the job
  2367  	regResp, wm, err := jobs.Register(job, nil)
  2368  	must.NoError(t, err)
  2369  	assertWriteMeta(t, wm)
  2370  
  2371  	// Perform scaling action
  2372  	scalingResp, wm, err := jobs.Scale(id, groupName,
  2373  		pointerOf(newCount), "need more instances", false,
  2374  		map[string]interface{}{
  2375  			"meta": "data",
  2376  		}, nil)
  2377  
  2378  	must.NoError(t, err)
  2379  	must.NotNil(t, scalingResp)
  2380  	must.UUIDv4(t, scalingResp.EvalID)
  2381  	must.Positive(t, scalingResp.EvalCreateIndex)
  2382  	must.Greater(t, regResp.JobModifyIndex, scalingResp.JobModifyIndex)
  2383  	assertWriteMeta(t, wm)
  2384  
  2385  	// Query the job again
  2386  	resp, _, err := jobs.Info(*job.ID, nil)
  2387  	must.NoError(t, err)
  2388  	must.Eq(t, *resp.TaskGroups[0].Count, newCount)
  2389  
  2390  	// Check for the scaling event
  2391  	status, _, err := jobs.ScaleStatus(*job.ID, nil)
  2392  	must.NoError(t, err)
  2393  	must.Len(t, 1, status.TaskGroups[groupName].Events)
  2394  	scalingEvent := status.TaskGroups[groupName].Events[0]
  2395  	must.False(t, scalingEvent.Error)
  2396  	must.Eq(t, "need more instances", scalingEvent.Message)
  2397  	must.MapEq(t, map[string]interface{}{"meta": "data"}, scalingEvent.Meta)
  2398  	must.Positive(t, scalingEvent.Time)
  2399  	must.UUIDv4(t, *scalingEvent.EvalID)
  2400  	must.Eq(t, scalingResp.EvalID, *scalingEvent.EvalID)
  2401  	must.Eq(t, int64(origCount), scalingEvent.PreviousCount)
  2402  }
  2403  
  2404  func TestJobs_ScaleAction_Error(t *testing.T) {
  2405  	testutil.Parallel(t)
  2406  
  2407  	c, s := makeClient(t, nil, nil)
  2408  	defer s.Stop()
  2409  	jobs := c.Jobs()
  2410  
  2411  	id := "job-id/with\\troublesome:characters\n?&字"
  2412  	job := testJobWithScalingPolicy()
  2413  	job.ID = &id
  2414  	groupName := *job.TaskGroups[0].Name
  2415  	prevCount := *job.TaskGroups[0].Count
  2416  
  2417  	// Register the job
  2418  	regResp, wm, err := jobs.Register(job, nil)
  2419  	must.NoError(t, err)
  2420  	assertWriteMeta(t, wm)
  2421  
  2422  	// Perform scaling action
  2423  	scaleResp, wm, err := jobs.Scale(id, groupName, nil, "something bad happened", true,
  2424  		map[string]interface{}{
  2425  			"meta": "data",
  2426  		}, nil)
  2427  
  2428  	must.NoError(t, err)
  2429  	must.NotNil(t, scaleResp)
  2430  	must.Eq(t, "", scaleResp.EvalID)
  2431  	must.Zero(t, scaleResp.EvalCreateIndex)
  2432  	assertWriteMeta(t, wm)
  2433  
  2434  	// Query the job again
  2435  	resp, _, err := jobs.Info(*job.ID, nil)
  2436  	must.NoError(t, err)
  2437  	must.Eq(t, *resp.TaskGroups[0].Count, prevCount)
  2438  	must.Eq(t, regResp.JobModifyIndex, scaleResp.JobModifyIndex)
  2439  	must.Zero(t, scaleResp.EvalCreateIndex)
  2440  	must.Eq(t, "", scaleResp.EvalID)
  2441  
  2442  	status, _, err := jobs.ScaleStatus(*job.ID, nil)
  2443  	must.NoError(t, err)
  2444  	must.Len(t, 1, status.TaskGroups[groupName].Events)
  2445  	errEvent := status.TaskGroups[groupName].Events[0]
  2446  	must.True(t, errEvent.Error)
  2447  	must.Eq(t, "something bad happened", errEvent.Message)
  2448  	must.Eq(t, map[string]interface{}{"meta": "data"}, errEvent.Meta)
  2449  	must.Positive(t, errEvent.Time)
  2450  	must.Nil(t, errEvent.EvalID)
  2451  }
  2452  
  2453  func TestJobs_ScaleAction_Noop(t *testing.T) {
  2454  	testutil.Parallel(t)
  2455  
  2456  	c, s := makeClient(t, nil, nil)
  2457  	defer s.Stop()
  2458  	jobs := c.Jobs()
  2459  
  2460  	id := "job-id/with\\troublesome:characters\n?&字"
  2461  	job := testJobWithScalingPolicy()
  2462  	job.ID = &id
  2463  	groupName := *job.TaskGroups[0].Name
  2464  	prevCount := *job.TaskGroups[0].Count
  2465  
  2466  	// Register the job
  2467  	regResp, wm, err := jobs.Register(job, nil)
  2468  	must.NoError(t, err)
  2469  	assertWriteMeta(t, wm)
  2470  
  2471  	// Perform scaling action
  2472  	scaleResp, wm, err := jobs.Scale(id, groupName, nil, "no count, just informative",
  2473  		false, map[string]interface{}{
  2474  			"meta": "data",
  2475  		}, nil)
  2476  
  2477  	must.NoError(t, err)
  2478  	must.NotNil(t, scaleResp)
  2479  	must.Eq(t, "", scaleResp.EvalID)
  2480  	must.Zero(t, scaleResp.EvalCreateIndex)
  2481  	assertWriteMeta(t, wm)
  2482  
  2483  	// Query the job again
  2484  	resp, _, err := jobs.Info(*job.ID, nil)
  2485  	must.NoError(t, err)
  2486  	must.Eq(t, *resp.TaskGroups[0].Count, prevCount)
  2487  	must.Eq(t, regResp.JobModifyIndex, scaleResp.JobModifyIndex)
  2488  	must.Zero(t, scaleResp.EvalCreateIndex)
  2489  	must.NotNil(t, scaleResp.EvalID)
  2490  
  2491  	status, _, err := jobs.ScaleStatus(*job.ID, nil)
  2492  	must.NoError(t, err)
  2493  	must.Len(t, 1, status.TaskGroups[groupName].Events)
  2494  	noopEvent := status.TaskGroups[groupName].Events[0]
  2495  	must.False(t, noopEvent.Error)
  2496  	must.Eq(t, "no count, just informative", noopEvent.Message)
  2497  	must.MapEq(t, map[string]interface{}{"meta": "data"}, noopEvent.Meta)
  2498  	must.Positive(t, noopEvent.Time)
  2499  	must.Nil(t, noopEvent.EvalID)
  2500  }
  2501  
  2502  // TestJobs_ScaleStatus tests the /scale status endpoint for task group count
  2503  func TestJobs_ScaleStatus(t *testing.T) {
  2504  	testutil.Parallel(t)
  2505  
  2506  	c, s := makeClient(t, nil, nil)
  2507  	defer s.Stop()
  2508  	jobs := c.Jobs()
  2509  
  2510  	// Trying to retrieve a status before it exists returns an error
  2511  	id := "job-id/with\\troublesome:characters\n?&字"
  2512  	_, _, err := jobs.ScaleStatus(id, nil)
  2513  	must.ErrorContains(t, err, "not found")
  2514  
  2515  	// Register the job
  2516  	job := testJob()
  2517  	job.ID = &id
  2518  	groupName := *job.TaskGroups[0].Name
  2519  	groupCount := *job.TaskGroups[0].Count
  2520  	_, wm, err := jobs.Register(job, nil)
  2521  	must.NoError(t, err)
  2522  	assertWriteMeta(t, wm)
  2523  
  2524  	// Query the scaling endpoint and verify success
  2525  	result, qm, err := jobs.ScaleStatus(id, nil)
  2526  	must.NoError(t, err)
  2527  	assertQueryMeta(t, qm)
  2528  
  2529  	// Check that the result is what we expect
  2530  	must.Eq(t, groupCount, result.TaskGroups[groupName].Desired)
  2531  }
  2532  
  2533  func TestJobs_Services(t *testing.T) {
  2534  	// TODO(jrasell) add tests once registration process is in place.
  2535  }
  2536  
  2537  // TestJobs_Parse asserts ParseHCL and ParseHCLOpts use the API to parse HCL.
  2538  func TestJobs_Parse(t *testing.T) {
  2539  	testutil.Parallel(t)
  2540  
  2541  	jobspec := `job "example" {}`
  2542  
  2543  	// Assert ParseHCL returns an error if Nomad is not running to ensure
  2544  	// that parsing is done server-side and not via the jobspec package.
  2545  	{
  2546  		c, err := NewClient(DefaultConfig())
  2547  		must.NoError(t, err)
  2548  
  2549  		_, err = c.Jobs().ParseHCL(jobspec, false)
  2550  		must.ErrorContains(t, err, "Put")
  2551  	}
  2552  
  2553  	c, s := makeClient(t, nil, nil)
  2554  	defer s.Stop()
  2555  
  2556  	// Test ParseHCL
  2557  	job1, err := c.Jobs().ParseHCL(jobspec, false)
  2558  	must.NoError(t, err)
  2559  	must.Eq(t, "example", *job1.Name)
  2560  	must.Nil(t, job1.Namespace)
  2561  
  2562  	job1Canonicalized, err := c.Jobs().ParseHCL(jobspec, true)
  2563  	must.NoError(t, err)
  2564  	must.Eq(t, "example", *job1Canonicalized.Name)
  2565  	must.Eq(t, "default", *job1Canonicalized.Namespace)
  2566  	must.NotEq(t, job1, job1Canonicalized)
  2567  
  2568  	// Test ParseHCLOpts
  2569  	req := &JobsParseRequest{
  2570  		JobHCL:       jobspec,
  2571  		HCLv1:        false,
  2572  		Canonicalize: false,
  2573  	}
  2574  
  2575  	job2, err := c.Jobs().ParseHCLOpts(req)
  2576  	must.NoError(t, err)
  2577  	must.Eq(t, job1, job2)
  2578  
  2579  	// Test ParseHCLOpts with Canonicalize=true
  2580  	req = &JobsParseRequest{
  2581  		JobHCL:       jobspec,
  2582  		HCLv1:        false,
  2583  		Canonicalize: true,
  2584  	}
  2585  	job2Canonicalized, err := c.Jobs().ParseHCLOpts(req)
  2586  	must.NoError(t, err)
  2587  	must.Eq(t, job1Canonicalized, job2Canonicalized)
  2588  
  2589  	// Test ParseHCLOpts with HCLv1=true
  2590  	req = &JobsParseRequest{
  2591  		JobHCL:       jobspec,
  2592  		HCLv1:        true,
  2593  		Canonicalize: false,
  2594  	}
  2595  
  2596  	job3, err := c.Jobs().ParseHCLOpts(req)
  2597  	must.NoError(t, err)
  2598  	must.Eq(t, job1, job3)
  2599  
  2600  	// Test ParseHCLOpts with HCLv1=true and Canonicalize=true
  2601  	req = &JobsParseRequest{
  2602  		JobHCL:       jobspec,
  2603  		HCLv1:        true,
  2604  		Canonicalize: true,
  2605  	}
  2606  	job3Canonicalized, err := c.Jobs().ParseHCLOpts(req)
  2607  	must.NoError(t, err)
  2608  	must.Eq(t, job1Canonicalized, job3Canonicalized)
  2609  }