github.com/anth0d/nomad@v0.0.0-20221214183521-ae3a0a2cad06/e2e/consultemplate/consultemplate.go (about)

     1  package consultemplate
     2  
     3  import (
     4  	"fmt"
     5  	"os"
     6  	"strings"
     7  	"time"
     8  
     9  	capi "github.com/hashicorp/consul/api"
    10  	api "github.com/hashicorp/nomad/api"
    11  	"github.com/hashicorp/nomad/e2e/e2eutil"
    12  	"github.com/hashicorp/nomad/e2e/framework"
    13  	"github.com/hashicorp/nomad/helper/uuid"
    14  	"github.com/hashicorp/nomad/jobspec"
    15  	"github.com/hashicorp/nomad/nomad/structs"
    16  	"github.com/hashicorp/nomad/testutil"
    17  )
    18  
    19  const ns = ""
    20  
    21  type ConsulTemplateTest struct {
    22  	framework.TC
    23  	jobIDs     []string
    24  	consulKeys []string
    25  
    26  	// namespaceIDs tracks the created namespace for removal after test
    27  	// completion.
    28  	namespaceIDs []string
    29  
    30  	// namespacedJobIDs tracks any non-default namespaced jobs for removal
    31  	// after test completion.
    32  	namespacedJobIDs map[string][]string
    33  }
    34  
    35  func init() {
    36  	framework.AddSuites(&framework.TestSuite{
    37  		Component:   "ConsulTemplate",
    38  		CanRunLocal: true,
    39  		Consul:      true,
    40  		Cases: []framework.TestCase{
    41  			&ConsulTemplateTest{
    42  				namespacedJobIDs: make(map[string][]string),
    43  			},
    44  		},
    45  	})
    46  }
    47  
    48  func (tc *ConsulTemplateTest) BeforeAll(f *framework.F) {
    49  	e2eutil.WaitForLeader(f.T(), tc.Nomad())
    50  	e2eutil.WaitForNodesReady(f.T(), tc.Nomad(), 1)
    51  }
    52  
    53  func (tc *ConsulTemplateTest) AfterEach(f *framework.F) {
    54  	if os.Getenv("NOMAD_TEST_SKIPCLEANUP") == "1" {
    55  		return
    56  	}
    57  
    58  	for _, id := range tc.jobIDs {
    59  		err := e2eutil.StopJob(id, "-purge")
    60  		f.Assert().NoError(err, "could not clean up job", id)
    61  	}
    62  	tc.jobIDs = []string{}
    63  
    64  	for _, key := range tc.consulKeys {
    65  		_, err := tc.Consul().KV().Delete(key, nil)
    66  		f.Assert().NoError(err, "could not clean up consul key", key)
    67  	}
    68  	tc.consulKeys = []string{}
    69  
    70  	for namespace, jobIDs := range tc.namespacedJobIDs {
    71  		for _, jobID := range jobIDs {
    72  			err := e2eutil.StopJob(jobID, "-purge", "-namespace", namespace)
    73  			f.Assert().NoError(err)
    74  		}
    75  	}
    76  	tc.namespacedJobIDs = make(map[string][]string)
    77  
    78  	for _, ns := range tc.namespaceIDs {
    79  		_, err := e2eutil.Command("nomad", "namespace", "delete", ns)
    80  		f.Assert().NoError(err)
    81  	}
    82  	tc.namespaceIDs = []string{}
    83  
    84  	_, err := e2eutil.Command("nomad", "system", "gc")
    85  	f.NoError(err)
    86  }
    87  
    88  // TestTemplateUpdateTriggers exercises consul-template integration, verifying that:
    89  // - missing keys block allocations from starting
    90  // - key updates trigger re-render
    91  // - service updates trigger re-render
    92  // - 'noop' vs ”restart' configuration
    93  func (tc *ConsulTemplateTest) TestTemplateUpdateTriggers(f *framework.F) {
    94  
    95  	wc := &e2eutil.WaitConfig{}
    96  	interval, retries := wc.OrDefault()
    97  
    98  	key := "consultemplate-" + uuid.Generate()[:8]
    99  	jobID := key
   100  
   101  	replacement := fmt.Sprintf(`---
   102  key: {{ key "%s" }}
   103  job: {{ env "NOMAD_JOB_NAME" }}
   104  `, key)
   105  
   106  	// Ensure consul key does not exist
   107  	_, err := tc.Consul().KV().Delete(key, nil)
   108  	f.NoError(err)
   109  
   110  	// Parse job so we can replace the template stanza with isolated keys
   111  	job, err := jobspec.ParseFile("consultemplate/input/templating.nomad")
   112  	f.NoError(err)
   113  	job.ID = &jobID
   114  
   115  	job.TaskGroups[0].Tasks[0].Templates[1].EmbeddedTmpl = &replacement
   116  	job.TaskGroups[1].Tasks[0].Templates[1].EmbeddedTmpl = &replacement
   117  
   118  	tc.jobIDs = append(tc.jobIDs, jobID)
   119  
   120  	_, _, err = tc.Nomad().Jobs().Register(job, nil)
   121  	f.NoError(err, "could not register job")
   122  
   123  	expected := map[string]string{
   124  		"upstream":          "running",
   125  		"exec_downstream":   "pending",
   126  		"docker_downstream": "pending"}
   127  	f.NoError(waitForAllocStatusByGroup(jobID, ns, expected, nil))
   128  
   129  	// We won't reschedule any of these allocs, so we can cache these IDs for later
   130  	downstreams := map[string]string{} // alloc ID -> group name
   131  	allocs, err := e2eutil.AllocsForJob(jobID, ns)
   132  	f.NoError(err)
   133  	for _, alloc := range allocs {
   134  		group := alloc["Task Group"]
   135  		if group == "docker_downstream" || group == "exec_downstream" {
   136  			downstreams[alloc["ID"]] = group
   137  		}
   138  	}
   139  
   140  	// note: checking pending above doesn't tell us whether we've tried to render
   141  	// the template yet, so we still need to poll for the template event
   142  	for allocID, group := range downstreams {
   143  		var checkErr error
   144  		testutil.WaitForResultRetries(retries, func() (bool, error) {
   145  			time.Sleep(interval)
   146  			out, err := e2eutil.Command("nomad", "alloc", "status", allocID)
   147  			f.NoError(err, "could not get allocation status")
   148  			return strings.Contains(out, "Missing: kv.block"),
   149  				fmt.Errorf("expected %q to be blocked on Consul key", group)
   150  		}, func(e error) {
   151  			checkErr = e
   152  		})
   153  		f.NoError(checkErr)
   154  	}
   155  
   156  	// Write our key to Consul
   157  	_, err = tc.Consul().KV().Put(&capi.KVPair{Key: key, Value: []byte("foo")}, nil)
   158  	f.NoError(err)
   159  	tc.consulKeys = append(tc.consulKeys, key)
   160  
   161  	// template will render, allowing downstream allocs to run
   162  	expected = map[string]string{
   163  		"upstream":          "running",
   164  		"exec_downstream":   "running",
   165  		"docker_downstream": "running"}
   166  	f.NoError(waitForAllocStatusByGroup(jobID, ns, expected, &e2eutil.WaitConfig{
   167  		Interval: time.Millisecond * 300,
   168  		Retries:  100,
   169  	}))
   170  
   171  	// verify we've rendered the templates
   172  	for allocID := range downstreams {
   173  		f.NoError(waitForTemplateRender(allocID, "task/local/kv.yml",
   174  			func(out string) bool {
   175  				return strings.TrimSpace(out) == "---\nkey: foo\njob: templating"
   176  			}, nil), "expected consul key to be rendered")
   177  
   178  		f.NoError(waitForTemplateRender(allocID, "task/local/services.conf",
   179  			func(out string) bool {
   180  				confLines := strings.Split(strings.TrimSpace(out), "\n")
   181  				servers := 0
   182  				for _, line := range confLines {
   183  					if strings.HasPrefix(line, "server upstream-service ") {
   184  						servers++
   185  					}
   186  				}
   187  				return servers == 2
   188  			}, nil), "expected 2 upstream servers")
   189  	}
   190  
   191  	// Update our key in Consul
   192  	_, err = tc.Consul().KV().Put(&capi.KVPair{Key: key, Value: []byte("bar")}, nil)
   193  	f.NoError(err)
   194  
   195  	// Wait for restart
   196  	for allocID, group := range downstreams {
   197  		var checkErr error
   198  		testutil.WaitForResultRetries(retries, func() (bool, error) {
   199  			time.Sleep(interval)
   200  			out, err := e2eutil.Command("nomad", "alloc", "status", allocID)
   201  			f.NoError(err, "could not get allocation status")
   202  
   203  			section, err := e2eutil.GetSection(out, "Task Events:")
   204  			f.NoError(err, out)
   205  
   206  			restarts, err := e2eutil.GetField(section, "Total Restarts")
   207  			f.NoError(err)
   208  			return restarts == "1",
   209  				fmt.Errorf("expected 1 restart for %q but found %s", group, restarts)
   210  		}, func(e error) {
   211  			checkErr = e
   212  		})
   213  		f.NoError(checkErr)
   214  
   215  		// verify we've re-rendered the template
   216  		f.NoError(waitForTemplateRender(allocID, "task/local/kv.yml",
   217  			func(out string) bool {
   218  				return strings.TrimSpace(out) == "---\nkey: bar\njob: templating"
   219  			}, nil), "expected updated consul key")
   220  	}
   221  
   222  	// increase the count for upstreams
   223  	count := 3
   224  	job.TaskGroups[2].Count = &count
   225  	_, _, err = tc.Nomad().Jobs().Register(job, nil)
   226  	f.NoError(err, "could not register job")
   227  
   228  	// wait for re-rendering
   229  	for allocID := range downstreams {
   230  		f.NoError(waitForTemplateRender(allocID, "task/local/services.conf",
   231  			func(out string) bool {
   232  				confLines := strings.Split(strings.TrimSpace(out), "\n")
   233  				servers := 0
   234  				for _, line := range confLines {
   235  					if strings.HasPrefix(line, "server upstream-service ") {
   236  						servers++
   237  					}
   238  				}
   239  				return servers == 3
   240  			}, nil), "expected 3 upstream servers")
   241  
   242  		// verify noop was honored: no additional restarts
   243  		out, err := e2eutil.Command("nomad", "alloc", "status", allocID)
   244  		f.NoError(err, "could not get allocation status")
   245  
   246  		section, err := e2eutil.GetSection(out, "Task Events:")
   247  		f.NoError(err, out)
   248  
   249  		restarts, err := e2eutil.GetField(section, "Total Restarts")
   250  		f.NoError(err)
   251  		f.Equal("1", restarts, "expected no new restarts for group")
   252  	}
   253  }
   254  
   255  // TestTemplatePathInterpolation_Ok asserts that NOMAD_*_DIR variables are
   256  // properly interpolated into template source and destination paths without
   257  // being treated as escaping.
   258  func (tc *ConsulTemplateTest) TestTemplatePathInterpolation_Ok(f *framework.F) {
   259  	jobID := "template-paths-" + uuid.Generate()[:8]
   260  	tc.jobIDs = append(tc.jobIDs, jobID)
   261  
   262  	allocStubs := e2eutil.RegisterAndWaitForAllocs(
   263  		f.T(), tc.Nomad(), "consultemplate/input/template_paths.nomad", jobID, "")
   264  	f.Len(allocStubs, 1)
   265  	allocID := allocStubs[0].ID
   266  
   267  	e2eutil.WaitForAllocRunning(f.T(), tc.Nomad(), allocID)
   268  
   269  	f.NoError(waitForTemplateRender(allocID, "task/secrets/foo/dst",
   270  		func(out string) bool {
   271  			return len(out) > 0
   272  		}, nil), "expected file to have contents")
   273  
   274  	f.NoError(waitForTemplateRender(allocID, "alloc/shared.txt",
   275  		func(out string) bool {
   276  			return len(out) > 0
   277  		}, nil), "expected shared-alloc-dir file to have contents")
   278  }
   279  
   280  // TestTemplatePathInterpolation_Bad asserts that template.source paths are not
   281  // allowed to escape the sandbox directory tree by default.
   282  func (tc *ConsulTemplateTest) TestTemplatePathInterpolation_Bad(f *framework.F) {
   283  	wc := &e2eutil.WaitConfig{}
   284  	interval, retries := wc.OrDefault()
   285  
   286  	jobID := "bad-template-paths-" + uuid.Generate()[:8]
   287  	tc.jobIDs = append(tc.jobIDs, jobID)
   288  
   289  	allocStubs := e2eutil.RegisterAndWaitForAllocs(
   290  		f.T(), tc.Nomad(), "consultemplate/input/bad_template_paths.nomad", jobID, "")
   291  	f.Len(allocStubs, 1)
   292  	allocID := allocStubs[0].ID
   293  
   294  	// Wait for alloc to fail
   295  	var err error
   296  	var alloc *api.Allocation
   297  	testutil.WaitForResultRetries(retries, func() (bool, error) {
   298  		time.Sleep(interval)
   299  		alloc, _, err = tc.Nomad().Allocations().Info(allocID, nil)
   300  		if err != nil {
   301  			return false, err
   302  		}
   303  
   304  		return alloc.ClientStatus == structs.AllocClientStatusFailed, fmt.Errorf("expected status failed, but was: %s", alloc.ClientStatus)
   305  	}, func(err error) {
   306  		f.NoError(err, "failed to wait on alloc")
   307  	})
   308  
   309  	// Assert the "source escapes" error occurred to prevent false
   310  	// positives.
   311  	found := false
   312  	for _, event := range alloc.TaskStates["task"].Events {
   313  		if strings.Contains(event.DisplayMessage, "template source path escapes alloc directory") {
   314  			found = true
   315  			break
   316  		}
   317  	}
   318  	f.True(found, "alloc failed but NOT due to expected source path escape error")
   319  }
   320  
   321  // TestTemplatePathInterpolation_SharedAllocDir asserts that NOMAD_ALLOC_DIR
   322  // is supported as a destination for artifact and template blocks, and
   323  // that it is properly interpolated for task drivers with varying
   324  // filesystem isolation
   325  func (tc *ConsulTemplateTest) TestTemplatePathInterpolation_SharedAllocDir(f *framework.F) {
   326  	jobID := "template-shared-alloc-" + uuid.Generate()[:8]
   327  	tc.jobIDs = append(tc.jobIDs, jobID)
   328  
   329  	allocStubs := e2eutil.RegisterAndWaitForAllocs(
   330  		f.T(), tc.Nomad(), "consultemplate/input/template_shared_alloc.nomad", jobID, "")
   331  	f.Len(allocStubs, 1)
   332  	allocID := allocStubs[0].ID
   333  
   334  	e2eutil.WaitForAllocRunning(f.T(), tc.Nomad(), allocID)
   335  
   336  	for _, task := range []string{"docker", "exec", "raw_exec"} {
   337  
   338  		// tests that we can render templates into the shared alloc directory
   339  		f.NoError(waitForTaskFile(allocID, task, "${NOMAD_ALLOC_DIR}/raw_exec.env",
   340  			func(out string) bool {
   341  				return len(out) > 0 && strings.TrimSpace(out) != "/alloc"
   342  			}, nil), "expected raw_exec.env to not be '/alloc'")
   343  
   344  		f.NoError(waitForTaskFile(allocID, task, "${NOMAD_ALLOC_DIR}/exec.env",
   345  			func(out string) bool {
   346  				return strings.TrimSpace(out) == "/alloc"
   347  			}, nil), "expected shared exec.env to contain '/alloc'")
   348  
   349  		f.NoError(waitForTaskFile(allocID, task, "${NOMAD_ALLOC_DIR}/docker.env",
   350  			func(out string) bool {
   351  				return strings.TrimSpace(out) == "/alloc"
   352  			}, nil), "expected shared docker.env to contain '/alloc'")
   353  
   354  		// test that we can fetch artifacts into the shared alloc directory
   355  		for _, a := range []string{"google1.html", "google2.html", "google3.html"} {
   356  			f.NoError(waitForTaskFile(allocID, task, "${NOMAD_ALLOC_DIR}/"+a,
   357  				func(out string) bool {
   358  					return len(out) > 0
   359  				}, nil), "expected artifact in alloc dir")
   360  		}
   361  
   362  		// test that we can load environment variables rendered with templates using interpolated paths
   363  		out, err := e2eutil.Command("nomad", "alloc", "exec", "-task", task, allocID, "sh", "-c", "env")
   364  		f.NoError(err)
   365  		f.Contains(out, "HELLO_FROM=raw_exec")
   366  	}
   367  }
   368  
   369  // TestConsulTemplate_NomadServiceLookups tests consul-templates Nomad service
   370  // lookup functionality. It runs a job which registers two services, then
   371  // another which performs both a list and read template function lookup against
   372  // registered services.
   373  func (tc *ConsulTemplateTest) TestConsulTemplate_NomadServiceLookups(f *framework.F) {
   374  
   375  	// Set up our base job that will be used in various manners.
   376  	serviceJob, err := jobspec.ParseFile("consultemplate/input/nomad_provider_service.nomad")
   377  	f.NoError(err)
   378  	serviceJobID := "test-consul-template-nomad-lookups" + uuid.Generate()[0:8]
   379  	serviceJob.ID = &serviceJobID
   380  
   381  	_, _, err = tc.Nomad().Jobs().Register(serviceJob, nil)
   382  	f.NoError(err)
   383  	tc.jobIDs = append(tc.jobIDs, serviceJobID)
   384  	f.NoError(e2eutil.WaitForAllocStatusExpected(serviceJobID, "default", []string{"running"}), "job should be running")
   385  
   386  	// Pull the allocation ID for the job, we use this to ensure this is found
   387  	// in the rendered template later on.
   388  	serviceJobAllocs, err := e2eutil.AllocsForJob(serviceJobID, "default")
   389  	f.NoError(err)
   390  	f.Len(serviceJobAllocs, 1)
   391  	serviceAllocID := serviceJobAllocs[0]["ID"]
   392  
   393  	// Create at non-default namespace.
   394  	_, err = e2eutil.Command("nomad", "namespace", "apply", "platform")
   395  	f.NoError(err)
   396  	tc.namespaceIDs = append(tc.namespaceIDs, "NamespaceA")
   397  
   398  	// Register a job which includes services destined for the Nomad provider
   399  	// into the platform namespace. This is used to ensure consul-template
   400  	// lookups stay bound to the allocation namespace.
   401  	diffNamespaceServiceJobID := "test-consul-template-nomad-lookups" + uuid.Generate()[0:8]
   402  	f.NoError(e2eutil.Register(diffNamespaceServiceJobID, "consultemplate/input/nomad_provider_service_ns.nomad"))
   403  	tc.namespacedJobIDs["platform"] = append(tc.namespacedJobIDs["platform"], diffNamespaceServiceJobID)
   404  	f.NoError(e2eutil.WaitForAllocStatusExpected(diffNamespaceServiceJobID, "platform", []string{"running"}), "job should be running")
   405  
   406  	// Register a job which includes consul-template function performing Nomad
   407  	// service listing and reads.
   408  	serviceLookupJobID := "test-consul-template-nomad-lookups" + uuid.Generate()[0:8]
   409  	f.NoError(e2eutil.Register(serviceLookupJobID, "consultemplate/input/nomad_provider_service_lookup.nomad"))
   410  	tc.jobIDs = append(tc.jobIDs, serviceLookupJobID)
   411  	f.NoError(e2eutil.WaitForAllocStatusExpected(serviceLookupJobID, "default", []string{"running"}), "job should be running")
   412  
   413  	// Find the allocation ID for the job which contains templates, so we can
   414  	// perform filesystem actions.
   415  	serviceLookupJobAllocs, err := e2eutil.AllocsForJob(serviceLookupJobID, "default")
   416  	f.NoError(err)
   417  	f.Len(serviceLookupJobAllocs, 1)
   418  	serviceLookupAllocID := serviceLookupJobAllocs[0]["ID"]
   419  
   420  	// Ensure the listing (nomadServices) template function has found all
   421  	// services within the default namespace.
   422  	err = waitForTaskFile(serviceLookupAllocID, "test", "${NOMAD_TASK_DIR}/services.conf",
   423  		func(out string) bool {
   424  			if !strings.Contains(out, "service default-nomad-provider-service-primary [bar foo]") {
   425  				return false
   426  			}
   427  			if !strings.Contains(out, "service default-nomad-provider-service-secondary [baz buz]") {
   428  				return false
   429  			}
   430  			return !strings.Contains(out, "service platform-nomad-provider-service-secondary [baz buz]")
   431  		}, nil)
   432  	f.NoError(err)
   433  
   434  	// Ensure the direct service lookup has found the entry we expect.
   435  	err = waitForTaskFile(serviceLookupAllocID, "test", "${NOMAD_TASK_DIR}/service.conf",
   436  		func(out string) bool {
   437  			expected := fmt.Sprintf("service default-nomad-provider-service-primary [bar foo] dc1 %s", serviceAllocID)
   438  			return strings.Contains(out, expected)
   439  		}, nil)
   440  	f.NoError(err)
   441  
   442  	// Scale the default namespaced service job in order to change the expected
   443  	// number of entries.
   444  	count := 3
   445  	serviceJob.TaskGroups[0].Count = &count
   446  	_, _, err = tc.Nomad().Jobs().Register(serviceJob, nil)
   447  	f.NoError(err)
   448  
   449  	// Pull the allocation ID for the job, we use this to ensure this is found
   450  	// in the rendered template later on. Wrap this in an eventual do to the
   451  	// eventual consistency around the service registration process.
   452  	f.Eventually(func() bool {
   453  		serviceJobAllocs, err = e2eutil.AllocsForJob(serviceJobID, "default")
   454  		if err != nil {
   455  			return false
   456  		}
   457  		return len(serviceJobAllocs) == 3
   458  	}, 10*time.Second, 200*time.Millisecond, "unexpected number of allocs found")
   459  
   460  	// Track the expected entries, including the allocID to make this test
   461  	// actually valuable.
   462  	var expectedEntries []string
   463  	for _, allocs := range serviceJobAllocs {
   464  		e := fmt.Sprintf("service default-nomad-provider-service-primary [bar foo] dc1 %s", allocs["ID"])
   465  		expectedEntries = append(expectedEntries, e)
   466  	}
   467  
   468  	// Ensure the direct service lookup has the new entries we expect.
   469  	err = waitForTaskFile(serviceLookupAllocID, "test", "${NOMAD_TASK_DIR}/service.conf",
   470  		func(out string) bool {
   471  			for _, entry := range expectedEntries {
   472  				if !strings.Contains(out, entry) {
   473  					return false
   474  				}
   475  			}
   476  			return true
   477  		}, nil)
   478  	f.NoError(err)
   479  }
   480  
   481  func waitForTaskFile(allocID, task, path string, test func(out string) bool, wc *e2eutil.WaitConfig) error {
   482  	var err error
   483  	var out string
   484  	interval, retries := wc.OrDefault()
   485  
   486  	testutil.WaitForResultRetries(retries, func() (bool, error) {
   487  		time.Sleep(interval)
   488  		out, err = e2eutil.Command("nomad", "alloc", "exec", "-task", task, allocID, "sh", "-c", "cat "+path)
   489  		if err != nil {
   490  			return false, fmt.Errorf("could not cat file %q from task %q in allocation %q: %v",
   491  				path, task, allocID, err)
   492  		}
   493  		return test(out), nil
   494  	}, func(e error) {
   495  		err = fmt.Errorf("test for file content failed: got %#v\nerror: %v", out, e)
   496  	})
   497  	return err
   498  }
   499  
   500  // waitForTemplateRender is a helper that grabs a file via alloc fs
   501  // and tests it for
   502  func waitForTemplateRender(allocID, path string, test func(string) bool, wc *e2eutil.WaitConfig) error {
   503  	var err error
   504  	var out string
   505  	interval, retries := wc.OrDefault()
   506  
   507  	testutil.WaitForResultRetries(retries, func() (bool, error) {
   508  		time.Sleep(interval)
   509  		out, err = e2eutil.Command("nomad", "alloc", "fs", allocID, path)
   510  		if err != nil {
   511  			return false, fmt.Errorf("could not get file %q from allocation %q: %v",
   512  				path, allocID, err)
   513  		}
   514  		return test(out), nil
   515  	}, func(e error) {
   516  		err = fmt.Errorf("test for file content failed: got %#v\nerror: %v", out, e)
   517  	})
   518  	return err
   519  }
   520  
   521  // waitForAllocStatusByGroup is similar to WaitForAllocStatus but maps
   522  // specific task group names to statuses without having to deal with specific counts
   523  func waitForAllocStatusByGroup(jobID, ns string, expected map[string]string, wc *e2eutil.WaitConfig) error {
   524  	var got []map[string]string
   525  	var err error
   526  	interval, retries := wc.OrDefault()
   527  	testutil.WaitForResultRetries(retries, func() (bool, error) {
   528  		time.Sleep(interval)
   529  		got, err = e2eutil.AllocsForJob(jobID, ns)
   530  		if err != nil {
   531  			return false, err
   532  		}
   533  		for _, row := range got {
   534  			group := row["Task Group"]
   535  			expectedStatus := expected[group]
   536  			gotStatus := row["Status"]
   537  			if expectedStatus != gotStatus {
   538  				return false, fmt.Errorf("expected %q to be %q, got %q",
   539  					group, expectedStatus, gotStatus)
   540  			}
   541  		}
   542  		err = nil
   543  		return true, nil
   544  	}, func(e error) {
   545  		err = fmt.Errorf("alloc status check failed: got %#v\nerror: %v", got, e)
   546  	})
   547  	return err
   548  }