github.com/hernad/nomad@v1.6.112/e2e/consultemplate/consultemplate.go (about)

     1  // Copyright (c) HashiCorp, Inc.
     2  // SPDX-License-Identifier: MPL-2.0
     3  
     4  package consultemplate
     5  
     6  import (
     7  	"fmt"
     8  	"os"
     9  	"strings"
    10  	"time"
    11  
    12  	capi "github.com/hashicorp/consul/api"
    13  	api "github.com/hernad/nomad/api"
    14  	"github.com/hernad/nomad/e2e/e2eutil"
    15  	"github.com/hernad/nomad/e2e/framework"
    16  	"github.com/hernad/nomad/helper/uuid"
    17  	"github.com/hernad/nomad/jobspec"
    18  	"github.com/hernad/nomad/nomad/structs"
    19  	"github.com/hernad/nomad/testutil"
    20  )
    21  
    22  const ns = ""
    23  
    24  type ConsulTemplateTest struct {
    25  	framework.TC
    26  	jobIDs     []string
    27  	consulKeys []string
    28  
    29  	// namespaceIDs tracks the created namespace for removal after test
    30  	// completion.
    31  	namespaceIDs []string
    32  
    33  	// namespacedJobIDs tracks any non-default namespaced jobs for removal
    34  	// after test completion.
    35  	namespacedJobIDs map[string][]string
    36  }
    37  
    38  func init() {
    39  	framework.AddSuites(&framework.TestSuite{
    40  		Component:   "ConsulTemplate",
    41  		CanRunLocal: true,
    42  		Consul:      true,
    43  		Cases: []framework.TestCase{
    44  			&ConsulTemplateTest{
    45  				namespacedJobIDs: make(map[string][]string),
    46  			},
    47  		},
    48  	})
    49  }
    50  
    51  func (tc *ConsulTemplateTest) BeforeAll(f *framework.F) {
    52  	e2eutil.WaitForLeader(f.T(), tc.Nomad())
    53  	e2eutil.WaitForNodesReady(f.T(), tc.Nomad(), 1)
    54  }
    55  
    56  func (tc *ConsulTemplateTest) AfterEach(f *framework.F) {
    57  	if os.Getenv("NOMAD_TEST_SKIPCLEANUP") == "1" {
    58  		return
    59  	}
    60  
    61  	for _, id := range tc.jobIDs {
    62  		err := e2eutil.StopJob(id, "-purge")
    63  		f.Assert().NoError(err, "could not clean up job", id)
    64  	}
    65  	tc.jobIDs = []string{}
    66  
    67  	for _, key := range tc.consulKeys {
    68  		_, err := tc.Consul().KV().Delete(key, nil)
    69  		f.Assert().NoError(err, "could not clean up consul key", key)
    70  	}
    71  	tc.consulKeys = []string{}
    72  
    73  	for namespace, jobIDs := range tc.namespacedJobIDs {
    74  		for _, jobID := range jobIDs {
    75  			err := e2eutil.StopJob(jobID, "-purge", "-namespace", namespace)
    76  			f.Assert().NoError(err)
    77  		}
    78  	}
    79  	tc.namespacedJobIDs = make(map[string][]string)
    80  
    81  	for _, ns := range tc.namespaceIDs {
    82  		_, err := e2eutil.Command("nomad", "namespace", "delete", ns)
    83  		f.Assert().NoError(err)
    84  	}
    85  	tc.namespaceIDs = []string{}
    86  
    87  	_, err := e2eutil.Command("nomad", "system", "gc")
    88  	f.NoError(err)
    89  }
    90  
    91  // TestTemplateUpdateTriggers exercises consul-template integration, verifying that:
    92  // - missing keys block allocations from starting
    93  // - key updates trigger re-render
    94  // - service updates trigger re-render
    95  // - 'noop' vs ”restart' configuration
    96  func (tc *ConsulTemplateTest) TestTemplateUpdateTriggers(f *framework.F) {
    97  
    98  	wc := &e2eutil.WaitConfig{}
    99  	interval, retries := wc.OrDefault()
   100  
   101  	key := "consultemplate-" + uuid.Generate()[:8]
   102  	jobID := key
   103  
   104  	replacement := fmt.Sprintf(`---
   105  key: {{ key "%s" }}
   106  job: {{ env "NOMAD_JOB_NAME" }}
   107  `, key)
   108  
   109  	// Ensure consul key does not exist
   110  	_, err := tc.Consul().KV().Delete(key, nil)
   111  	f.NoError(err)
   112  
   113  	// Parse job so we can replace the template block with isolated keys
   114  	job, err := jobspec.ParseFile("consultemplate/input/templating.nomad")
   115  	f.NoError(err)
   116  	job.ID = &jobID
   117  
   118  	job.TaskGroups[0].Tasks[0].Templates[1].EmbeddedTmpl = &replacement
   119  	job.TaskGroups[1].Tasks[0].Templates[1].EmbeddedTmpl = &replacement
   120  
   121  	tc.jobIDs = append(tc.jobIDs, jobID)
   122  
   123  	_, _, err = tc.Nomad().Jobs().Register(job, nil)
   124  	f.NoError(err, "could not register job")
   125  
   126  	expected := map[string]string{
   127  		"upstream":          "running",
   128  		"exec_downstream":   "pending",
   129  		"docker_downstream": "pending"}
   130  	f.NoError(waitForAllocStatusByGroup(jobID, ns, expected, nil))
   131  
   132  	// We won't reschedule any of these allocs, so we can cache these IDs for later
   133  	downstreams := map[string]string{} // alloc ID -> group name
   134  	allocs, err := e2eutil.AllocsForJob(jobID, ns)
   135  	f.NoError(err)
   136  	for _, alloc := range allocs {
   137  		group := alloc["Task Group"]
   138  		if group == "docker_downstream" || group == "exec_downstream" {
   139  			downstreams[alloc["ID"]] = group
   140  		}
   141  	}
   142  
   143  	// note: checking pending above doesn't tell us whether we've tried to render
   144  	// the template yet, so we still need to poll for the template event
   145  	for allocID, group := range downstreams {
   146  		var checkErr error
   147  		testutil.WaitForResultRetries(retries, func() (bool, error) {
   148  			time.Sleep(interval)
   149  			out, err := e2eutil.Command("nomad", "alloc", "status", allocID)
   150  			f.NoError(err, "could not get allocation status")
   151  			return strings.Contains(out, "Missing: kv.block"),
   152  				fmt.Errorf("expected %q to be blocked on Consul key", group)
   153  		}, func(e error) {
   154  			checkErr = e
   155  		})
   156  		f.NoError(checkErr)
   157  	}
   158  
   159  	// Write our key to Consul
   160  	_, err = tc.Consul().KV().Put(&capi.KVPair{Key: key, Value: []byte("foo")}, nil)
   161  	f.NoError(err)
   162  	tc.consulKeys = append(tc.consulKeys, key)
   163  
   164  	// template will render, allowing downstream allocs to run
   165  	expected = map[string]string{
   166  		"upstream":          "running",
   167  		"exec_downstream":   "running",
   168  		"docker_downstream": "running"}
   169  	f.NoError(waitForAllocStatusByGroup(jobID, ns, expected, &e2eutil.WaitConfig{
   170  		Interval: time.Millisecond * 300,
   171  		Retries:  100,
   172  	}))
   173  
   174  	// verify we've rendered the templates
   175  	for allocID := range downstreams {
   176  		f.NoError(waitForTemplateRender(allocID, "task/local/kv.yml",
   177  			func(out string) bool {
   178  				return strings.TrimSpace(out) == "---\nkey: foo\njob: templating"
   179  			}, nil), "expected consul key to be rendered")
   180  
   181  		f.NoError(waitForTemplateRender(allocID, "task/local/services.conf",
   182  			func(out string) bool {
   183  				confLines := strings.Split(strings.TrimSpace(out), "\n")
   184  				servers := 0
   185  				for _, line := range confLines {
   186  					if strings.HasPrefix(line, "server upstream-service ") {
   187  						servers++
   188  					}
   189  				}
   190  				return servers == 2
   191  			}, nil), "expected 2 upstream servers")
   192  	}
   193  
   194  	// Update our key in Consul
   195  	_, err = tc.Consul().KV().Put(&capi.KVPair{Key: key, Value: []byte("bar")}, nil)
   196  	f.NoError(err)
   197  
   198  	// Wait for restart
   199  	for allocID, group := range downstreams {
   200  		var checkErr error
   201  		testutil.WaitForResultRetries(retries, func() (bool, error) {
   202  			time.Sleep(interval)
   203  			out, err := e2eutil.Command("nomad", "alloc", "status", allocID)
   204  			f.NoError(err, "could not get allocation status")
   205  
   206  			section, err := e2eutil.GetSection(out, "Task Events:")
   207  			f.NoError(err, out)
   208  
   209  			restarts, err := e2eutil.GetField(section, "Total Restarts")
   210  			f.NoError(err)
   211  			return restarts == "1",
   212  				fmt.Errorf("expected 1 restart for %q but found %s", group, restarts)
   213  		}, func(e error) {
   214  			checkErr = e
   215  		})
   216  		f.NoError(checkErr)
   217  
   218  		// verify we've re-rendered the template
   219  		f.NoError(waitForTemplateRender(allocID, "task/local/kv.yml",
   220  			func(out string) bool {
   221  				return strings.TrimSpace(out) == "---\nkey: bar\njob: templating"
   222  			}, nil), "expected updated consul key")
   223  	}
   224  
   225  	// increase the count for upstreams
   226  	count := 3
   227  	job.TaskGroups[2].Count = &count
   228  	_, _, err = tc.Nomad().Jobs().Register(job, nil)
   229  	f.NoError(err, "could not register job")
   230  
   231  	// wait for re-rendering
   232  	for allocID := range downstreams {
   233  		f.NoError(waitForTemplateRender(allocID, "task/local/services.conf",
   234  			func(out string) bool {
   235  				confLines := strings.Split(strings.TrimSpace(out), "\n")
   236  				servers := 0
   237  				for _, line := range confLines {
   238  					if strings.HasPrefix(line, "server upstream-service ") {
   239  						servers++
   240  					}
   241  				}
   242  				return servers == 3
   243  			}, nil), "expected 3 upstream servers")
   244  
   245  		// verify noop was honored: no additional restarts
   246  		out, err := e2eutil.Command("nomad", "alloc", "status", allocID)
   247  		f.NoError(err, "could not get allocation status")
   248  
   249  		section, err := e2eutil.GetSection(out, "Task Events:")
   250  		f.NoError(err, out)
   251  
   252  		restarts, err := e2eutil.GetField(section, "Total Restarts")
   253  		f.NoError(err)
   254  		f.Equal("1", restarts, "expected no new restarts for group")
   255  	}
   256  }
   257  
   258  // TestTemplatePathInterpolation_Ok asserts that NOMAD_*_DIR variables are
   259  // properly interpolated into template source and destination paths without
   260  // being treated as escaping.
   261  func (tc *ConsulTemplateTest) TestTemplatePathInterpolation_Ok(f *framework.F) {
   262  	jobID := "template-paths-" + uuid.Generate()[:8]
   263  	tc.jobIDs = append(tc.jobIDs, jobID)
   264  
   265  	allocStubs := e2eutil.RegisterAndWaitForAllocs(
   266  		f.T(), tc.Nomad(), "consultemplate/input/template_paths.nomad", jobID, "")
   267  	f.Len(allocStubs, 1)
   268  	allocID := allocStubs[0].ID
   269  
   270  	e2eutil.WaitForAllocRunning(f.T(), tc.Nomad(), allocID)
   271  
   272  	f.NoError(waitForTemplateRender(allocID, "task/secrets/foo/dst",
   273  		func(out string) bool {
   274  			return len(out) > 0
   275  		}, nil), "expected file to have contents")
   276  
   277  	f.NoError(waitForTemplateRender(allocID, "alloc/shared.txt",
   278  		func(out string) bool {
   279  			return len(out) > 0
   280  		}, nil), "expected shared-alloc-dir file to have contents")
   281  }
   282  
   283  // TestTemplatePathInterpolation_Bad asserts that template.source paths are not
   284  // allowed to escape the sandbox directory tree by default.
   285  func (tc *ConsulTemplateTest) TestTemplatePathInterpolation_Bad(f *framework.F) {
   286  	wc := &e2eutil.WaitConfig{}
   287  	interval, retries := wc.OrDefault()
   288  
   289  	jobID := "bad-template-paths-" + uuid.Generate()[:8]
   290  	tc.jobIDs = append(tc.jobIDs, jobID)
   291  
   292  	allocStubs := e2eutil.RegisterAndWaitForAllocs(
   293  		f.T(), tc.Nomad(), "consultemplate/input/bad_template_paths.nomad", jobID, "")
   294  	f.Len(allocStubs, 1)
   295  	allocID := allocStubs[0].ID
   296  
   297  	// Wait for alloc to fail
   298  	var err error
   299  	var alloc *api.Allocation
   300  	testutil.WaitForResultRetries(retries, func() (bool, error) {
   301  		time.Sleep(interval)
   302  		alloc, _, err = tc.Nomad().Allocations().Info(allocID, nil)
   303  		if err != nil {
   304  			return false, err
   305  		}
   306  
   307  		return alloc.ClientStatus == structs.AllocClientStatusFailed, fmt.Errorf("expected status failed, but was: %s", alloc.ClientStatus)
   308  	}, func(err error) {
   309  		f.NoError(err, "failed to wait on alloc")
   310  	})
   311  
   312  	// Assert the "source escapes" error occurred to prevent false
   313  	// positives.
   314  	found := false
   315  	for _, event := range alloc.TaskStates["task"].Events {
   316  		if strings.Contains(event.DisplayMessage, "template source path escapes alloc directory") {
   317  			found = true
   318  			break
   319  		}
   320  	}
   321  	f.True(found, "alloc failed but NOT due to expected source path escape error")
   322  }
   323  
   324  // TestTemplatePathInterpolation_SharedAllocDir asserts that NOMAD_ALLOC_DIR
   325  // is supported as a destination for artifact and template blocks, and
   326  // that it is properly interpolated for task drivers with varying
   327  // filesystem isolation
   328  func (tc *ConsulTemplateTest) TestTemplatePathInterpolation_SharedAllocDir(f *framework.F) {
   329  	jobID := "template-shared-alloc-" + uuid.Generate()[:8]
   330  	tc.jobIDs = append(tc.jobIDs, jobID)
   331  
   332  	allocStubs := e2eutil.RegisterAndWaitForAllocs(
   333  		f.T(), tc.Nomad(), "consultemplate/input/template_shared_alloc.nomad", jobID, "")
   334  	f.Len(allocStubs, 1)
   335  	allocID := allocStubs[0].ID
   336  
   337  	e2eutil.WaitForAllocRunning(f.T(), tc.Nomad(), allocID)
   338  
   339  	for _, task := range []string{"docker", "exec", "raw_exec"} {
   340  
   341  		// tests that we can render templates into the shared alloc directory
   342  		f.NoError(waitForTaskFile(allocID, task, "${NOMAD_ALLOC_DIR}/raw_exec.env",
   343  			func(out string) bool {
   344  				return len(out) > 0 && strings.TrimSpace(out) != "/alloc"
   345  			}, nil), "expected raw_exec.env to not be '/alloc'")
   346  
   347  		f.NoError(waitForTaskFile(allocID, task, "${NOMAD_ALLOC_DIR}/exec.env",
   348  			func(out string) bool {
   349  				return strings.TrimSpace(out) == "/alloc"
   350  			}, nil), "expected shared exec.env to contain '/alloc'")
   351  
   352  		f.NoError(waitForTaskFile(allocID, task, "${NOMAD_ALLOC_DIR}/docker.env",
   353  			func(out string) bool {
   354  				return strings.TrimSpace(out) == "/alloc"
   355  			}, nil), "expected shared docker.env to contain '/alloc'")
   356  
   357  		// test that we can fetch artifacts into the shared alloc directory
   358  		for _, a := range []string{"google1.html", "google2.html", "google3.html"} {
   359  			f.NoError(waitForTaskFile(allocID, task, "${NOMAD_ALLOC_DIR}/"+a,
   360  				func(out string) bool {
   361  					return len(out) > 0
   362  				}, nil), "expected artifact in alloc dir")
   363  		}
   364  
   365  		// test that we can load environment variables rendered with templates using interpolated paths
   366  		out, err := e2eutil.Command("nomad", "alloc", "exec", "-task", task, allocID, "sh", "-c", "env")
   367  		f.NoError(err)
   368  		f.Contains(out, "HELLO_FROM=raw_exec")
   369  	}
   370  }
   371  
   372  // TestConsulTemplate_NomadServiceLookups tests consul-templates Nomad service
   373  // lookup functionality. It runs a job which registers two services, then
   374  // another which performs both a list and read template function lookup against
   375  // registered services.
   376  func (tc *ConsulTemplateTest) TestConsulTemplate_NomadServiceLookups(f *framework.F) {
   377  
   378  	// Set up our base job that will be used in various manners.
   379  	serviceJob, err := jobspec.ParseFile("consultemplate/input/nomad_provider_service.nomad")
   380  	f.NoError(err)
   381  	serviceJobID := "test-consul-template-nomad-lookups" + uuid.Generate()[0:8]
   382  	serviceJob.ID = &serviceJobID
   383  
   384  	_, _, err = tc.Nomad().Jobs().Register(serviceJob, nil)
   385  	f.NoError(err)
   386  	tc.jobIDs = append(tc.jobIDs, serviceJobID)
   387  	f.NoError(e2eutil.WaitForAllocStatusExpected(serviceJobID, "default", []string{"running"}), "job should be running")
   388  
   389  	// Pull the allocation ID for the job, we use this to ensure this is found
   390  	// in the rendered template later on.
   391  	serviceJobAllocs, err := e2eutil.AllocsForJob(serviceJobID, "default")
   392  	f.NoError(err)
   393  	f.Len(serviceJobAllocs, 1)
   394  	serviceAllocID := serviceJobAllocs[0]["ID"]
   395  
   396  	// Create at non-default namespace.
   397  	_, err = e2eutil.Command("nomad", "namespace", "apply", "platform")
   398  	f.NoError(err)
   399  	tc.namespaceIDs = append(tc.namespaceIDs, "NamespaceA")
   400  
   401  	// Register a job which includes services destined for the Nomad provider
   402  	// into the platform namespace. This is used to ensure consul-template
   403  	// lookups stay bound to the allocation namespace.
   404  	diffNamespaceServiceJobID := "test-consul-template-nomad-lookups" + uuid.Generate()[0:8]
   405  	f.NoError(e2eutil.Register(diffNamespaceServiceJobID, "consultemplate/input/nomad_provider_service_ns.nomad"))
   406  	tc.namespacedJobIDs["platform"] = append(tc.namespacedJobIDs["platform"], diffNamespaceServiceJobID)
   407  	f.NoError(e2eutil.WaitForAllocStatusExpected(diffNamespaceServiceJobID, "platform", []string{"running"}), "job should be running")
   408  
   409  	// Register a job which includes consul-template function performing Nomad
   410  	// service listing and reads.
   411  	serviceLookupJobID := "test-consul-template-nomad-lookups" + uuid.Generate()[0:8]
   412  	f.NoError(e2eutil.Register(serviceLookupJobID, "consultemplate/input/nomad_provider_service_lookup.nomad"))
   413  	tc.jobIDs = append(tc.jobIDs, serviceLookupJobID)
   414  	f.NoError(e2eutil.WaitForAllocStatusExpected(serviceLookupJobID, "default", []string{"running"}), "job should be running")
   415  
   416  	// Find the allocation ID for the job which contains templates, so we can
   417  	// perform filesystem actions.
   418  	serviceLookupJobAllocs, err := e2eutil.AllocsForJob(serviceLookupJobID, "default")
   419  	f.NoError(err)
   420  	f.Len(serviceLookupJobAllocs, 1)
   421  	serviceLookupAllocID := serviceLookupJobAllocs[0]["ID"]
   422  
   423  	// Ensure the listing (nomadServices) template function has found all
   424  	// services within the default namespace.
   425  	err = waitForTaskFile(serviceLookupAllocID, "test", "${NOMAD_TASK_DIR}/services.conf",
   426  		func(out string) bool {
   427  			if !strings.Contains(out, "service default-nomad-provider-service-primary [bar foo]") {
   428  				return false
   429  			}
   430  			if !strings.Contains(out, "service default-nomad-provider-service-secondary [baz buz]") {
   431  				return false
   432  			}
   433  			return !strings.Contains(out, "service platform-nomad-provider-service-secondary [baz buz]")
   434  		}, nil)
   435  	f.NoError(err)
   436  
   437  	// Ensure the direct service lookup has found the entry we expect.
   438  	err = waitForTaskFile(serviceLookupAllocID, "test", "${NOMAD_TASK_DIR}/service.conf",
   439  		func(out string) bool {
   440  			expected := fmt.Sprintf("service default-nomad-provider-service-primary [bar foo] dc1 %s", serviceAllocID)
   441  			return strings.Contains(out, expected)
   442  		}, nil)
   443  	f.NoError(err)
   444  
   445  	// Scale the default namespaced service job in order to change the expected
   446  	// number of entries.
   447  	count := 3
   448  	serviceJob.TaskGroups[0].Count = &count
   449  	_, _, err = tc.Nomad().Jobs().Register(serviceJob, nil)
   450  	f.NoError(err)
   451  
   452  	// Pull the allocation ID for the job, we use this to ensure this is found
   453  	// in the rendered template later on. Wrap this in an eventual do to the
   454  	// eventual consistency around the service registration process.
   455  	f.Eventually(func() bool {
   456  		serviceJobAllocs, err = e2eutil.AllocsForJob(serviceJobID, "default")
   457  		if err != nil {
   458  			return false
   459  		}
   460  		return len(serviceJobAllocs) == 3
   461  	}, 10*time.Second, 200*time.Millisecond, "unexpected number of allocs found")
   462  
   463  	// Track the expected entries, including the allocID to make this test
   464  	// actually valuable.
   465  	var expectedEntries []string
   466  	for _, allocs := range serviceJobAllocs {
   467  		e := fmt.Sprintf("service default-nomad-provider-service-primary [bar foo] dc1 %s", allocs["ID"])
   468  		expectedEntries = append(expectedEntries, e)
   469  	}
   470  
   471  	// Ensure the direct service lookup has the new entries we expect.
   472  	err = waitForTaskFile(serviceLookupAllocID, "test", "${NOMAD_TASK_DIR}/service.conf",
   473  		func(out string) bool {
   474  			for _, entry := range expectedEntries {
   475  				if !strings.Contains(out, entry) {
   476  					return false
   477  				}
   478  			}
   479  			return true
   480  		}, nil)
   481  	f.NoError(err)
   482  }
   483  
   484  func waitForTaskFile(allocID, task, path string, test func(out string) bool, wc *e2eutil.WaitConfig) error {
   485  	var err error
   486  	var out string
   487  	interval, retries := wc.OrDefault()
   488  
   489  	testutil.WaitForResultRetries(retries, func() (bool, error) {
   490  		time.Sleep(interval)
   491  		out, err = e2eutil.Command("nomad", "alloc", "exec", "-task", task, allocID, "sh", "-c", "cat "+path)
   492  		if err != nil {
   493  			return false, fmt.Errorf("could not cat file %q from task %q in allocation %q: %v",
   494  				path, task, allocID, err)
   495  		}
   496  		return test(out), nil
   497  	}, func(e error) {
   498  		err = fmt.Errorf("test for file content failed: got %#v\nerror: %v", out, e)
   499  	})
   500  	return err
   501  }
   502  
   503  // waitForTemplateRender is a helper that grabs a file via alloc fs
   504  // and tests it for
   505  func waitForTemplateRender(allocID, path string, test func(string) bool, wc *e2eutil.WaitConfig) error {
   506  	var err error
   507  	var out string
   508  	interval, retries := wc.OrDefault()
   509  
   510  	testutil.WaitForResultRetries(retries, func() (bool, error) {
   511  		time.Sleep(interval)
   512  		out, err = e2eutil.Command("nomad", "alloc", "fs", allocID, path)
   513  		if err != nil {
   514  			return false, fmt.Errorf("could not get file %q from allocation %q: %v",
   515  				path, allocID, err)
   516  		}
   517  		return test(out), nil
   518  	}, func(e error) {
   519  		err = fmt.Errorf("test for file content failed: got %#v\nerror: %v", out, e)
   520  	})
   521  	return err
   522  }
   523  
   524  // waitForAllocStatusByGroup is similar to WaitForAllocStatus but maps
   525  // specific task group names to statuses without having to deal with specific counts
   526  func waitForAllocStatusByGroup(jobID, ns string, expected map[string]string, wc *e2eutil.WaitConfig) error {
   527  	var got []map[string]string
   528  	var err error
   529  	interval, retries := wc.OrDefault()
   530  	testutil.WaitForResultRetries(retries, func() (bool, error) {
   531  		time.Sleep(interval)
   532  		got, err = e2eutil.AllocsForJob(jobID, ns)
   533  		if err != nil {
   534  			return false, err
   535  		}
   536  		for _, row := range got {
   537  			group := row["Task Group"]
   538  			expectedStatus := expected[group]
   539  			gotStatus := row["Status"]
   540  			if expectedStatus != gotStatus {
   541  				return false, fmt.Errorf("expected %q to be %q, got %q",
   542  					group, expectedStatus, gotStatus)
   543  			}
   544  		}
   545  		err = nil
   546  		return true, nil
   547  	}, func(e error) {
   548  		err = fmt.Errorf("alloc status check failed: got %#v\nerror: %v", got, e)
   549  	})
   550  	return err
   551  }