github.com/Ilhicas/nomad@v1.0.4-0.20210304152020-e86851182bc3/e2e/consultemplate/consultemplate.go (about) 1 package consultemplate 2 3 import ( 4 "fmt" 5 "os" 6 "strings" 7 "time" 8 9 capi "github.com/hashicorp/consul/api" 10 "github.com/hashicorp/nomad/api" 11 "github.com/hashicorp/nomad/e2e/e2eutil" 12 e2e "github.com/hashicorp/nomad/e2e/e2eutil" 13 "github.com/hashicorp/nomad/e2e/framework" 14 "github.com/hashicorp/nomad/helper/uuid" 15 "github.com/hashicorp/nomad/jobspec" 16 "github.com/hashicorp/nomad/nomad/structs" 17 "github.com/hashicorp/nomad/testutil" 18 ) 19 20 const ns = "" 21 22 type ConsulTemplateTest struct { 23 framework.TC 24 jobIDs []string 25 consulKeys []string 26 } 27 28 func init() { 29 framework.AddSuites(&framework.TestSuite{ 30 Component: "ConsulTemplate", 31 CanRunLocal: true, 32 Consul: true, 33 Cases: []framework.TestCase{ 34 new(ConsulTemplateTest), 35 }, 36 }) 37 } 38 39 func (tc *ConsulTemplateTest) BeforeAll(f *framework.F) { 40 e2e.WaitForLeader(f.T(), tc.Nomad()) 41 e2e.WaitForNodesReady(f.T(), tc.Nomad(), 1) 42 } 43 44 func (tc *ConsulTemplateTest) AfterEach(f *framework.F) { 45 if os.Getenv("NOMAD_TEST_SKIPCLEANUP") == "1" { 46 return 47 } 48 49 for _, id := range tc.jobIDs { 50 _, err := e2e.Command("nomad", "job", "stop", "-purge", id) 51 f.Assert().NoError(err, "could not clean up job", id) 52 } 53 tc.jobIDs = []string{} 54 55 for _, key := range tc.consulKeys { 56 _, err := tc.Consul().KV().Delete(key, nil) 57 f.Assert().NoError(err, "could not clean up consul key", key) 58 } 59 tc.consulKeys = []string{} 60 61 _, err := e2e.Command("nomad", "system", "gc") 62 f.NoError(err) 63 } 64 65 // TestTemplateUpdateTriggers exercises consul-template integration, verifying that: 66 // - missing keys block allocations from starting 67 // - key updates trigger re-render 68 // - service updates trigger re-render 69 // - 'noop' vs ''restart' configuration 70 func (tc *ConsulTemplateTest) TestTemplateUpdateTriggers(f *framework.F) { 71 72 wc := &e2e.WaitConfig{} 73 interval, retries := wc.OrDefault() 74 75 key := "consultemplate-" + uuid.Generate()[:8] 76 jobID := key 77 78 replacement := fmt.Sprintf(`--- 79 key: {{ key "%s" }} 80 job: {{ env "NOMAD_JOB_NAME" }} 81 `, key) 82 83 // Ensure consul key does not exist 84 _, err := tc.Consul().KV().Delete(key, nil) 85 f.NoError(err) 86 87 // Parse job so we can replace the template stanza with isolated keys 88 job, err := jobspec.ParseFile("consultemplate/input/templating.nomad") 89 f.NoError(err) 90 job.ID = &jobID 91 92 job.TaskGroups[0].Tasks[0].Templates[1].EmbeddedTmpl = &replacement 93 job.TaskGroups[1].Tasks[0].Templates[1].EmbeddedTmpl = &replacement 94 95 tc.jobIDs = append(tc.jobIDs, jobID) 96 97 _, _, err = tc.Nomad().Jobs().Register(job, nil) 98 f.NoError(err, "could not register job") 99 100 expected := map[string]string{ 101 "upstream": "running", 102 "exec_downstream": "pending", 103 "docker_downstream": "pending"} 104 f.NoError(waitForAllocStatusByGroup(jobID, ns, expected, nil)) 105 106 // We won't reschedule any of these allocs, so we can cache these IDs for later 107 downstreams := map[string]string{} // alloc ID -> group name 108 allocs, err := e2e.AllocsForJob(jobID, ns) 109 f.NoError(err) 110 for _, alloc := range allocs { 111 group := alloc["Task Group"] 112 if group == "docker_downstream" || group == "exec_downstream" { 113 downstreams[alloc["ID"]] = group 114 } 115 } 116 117 // note: checking pending above doesn't tell us whether we've tried to render 118 // the template yet, so we still need to poll for the template event 119 for allocID, group := range downstreams { 120 var checkErr error 121 testutil.WaitForResultRetries(retries, func() (bool, error) { 122 time.Sleep(interval) 123 out, err := e2e.Command("nomad", "alloc", "status", allocID) 124 f.NoError(err, "could not get allocation status") 125 return strings.Contains(out, "Missing: kv.block"), 126 fmt.Errorf("expected %q to be blocked on Consul key", group) 127 }, func(e error) { 128 checkErr = e 129 }) 130 f.NoError(checkErr) 131 } 132 133 // Write our key to Consul 134 _, err = tc.Consul().KV().Put(&capi.KVPair{Key: key, Value: []byte("foo")}, nil) 135 f.NoError(err) 136 tc.consulKeys = append(tc.consulKeys, key) 137 138 // template will render, allowing downstream allocs to run 139 expected = map[string]string{ 140 "upstream": "running", 141 "exec_downstream": "running", 142 "docker_downstream": "running"} 143 f.NoError(waitForAllocStatusByGroup(jobID, ns, expected, nil)) 144 145 // verify we've rendered the templates 146 for allocID := range downstreams { 147 f.NoError(waitForTemplateRender(allocID, "task/local/kv.yml", 148 func(out string) bool { 149 return strings.TrimSpace(out) == "---\nkey: foo\njob: templating" 150 }, nil), "expected consul key to be rendered") 151 152 f.NoError(waitForTemplateRender(allocID, "task/local/services.conf", 153 func(out string) bool { 154 confLines := strings.Split(strings.TrimSpace(out), "\n") 155 servers := 0 156 for _, line := range confLines { 157 if strings.HasPrefix(line, "server upstream-service ") { 158 servers++ 159 } 160 } 161 return servers == 2 162 }, nil), "expected 2 upstream servers") 163 } 164 165 // Update our key in Consul 166 _, err = tc.Consul().KV().Put(&capi.KVPair{Key: key, Value: []byte("bar")}, nil) 167 f.NoError(err) 168 169 // Wait for restart 170 for allocID, group := range downstreams { 171 var checkErr error 172 testutil.WaitForResultRetries(retries, func() (bool, error) { 173 time.Sleep(interval) 174 out, err := e2e.Command("nomad", "alloc", "status", allocID) 175 f.NoError(err, "could not get allocation status") 176 177 section, err := e2e.GetSection(out, "Task Events:") 178 f.NoError(err, out) 179 180 restarts, err := e2e.GetField(section, "Total Restarts") 181 f.NoError(err) 182 return restarts == "1", 183 fmt.Errorf("expected 1 restart for %q but found %s", group, restarts) 184 }, func(e error) { 185 checkErr = e 186 }) 187 f.NoError(checkErr) 188 189 // verify we've re-rendered the template 190 f.NoError(waitForTemplateRender(allocID, "task/local/kv.yml", 191 func(out string) bool { 192 return strings.TrimSpace(out) == "---\nkey: bar\njob: templating" 193 }, nil), "expected updated consul key") 194 } 195 196 // increase the count for upstreams 197 count := 3 198 job.TaskGroups[2].Count = &count 199 _, _, err = tc.Nomad().Jobs().Register(job, nil) 200 f.NoError(err, "could not register job") 201 202 // wait for re-rendering 203 for allocID := range downstreams { 204 f.NoError(waitForTemplateRender(allocID, "task/local/services.conf", 205 func(out string) bool { 206 confLines := strings.Split(strings.TrimSpace(out), "\n") 207 servers := 0 208 for _, line := range confLines { 209 if strings.HasPrefix(line, "server upstream-service ") { 210 servers++ 211 } 212 } 213 return servers == 3 214 }, nil), "expected 3 upstream servers") 215 216 // verify noop was honored: no additional restarts 217 out, err := e2e.Command("nomad", "alloc", "status", allocID) 218 f.NoError(err, "could not get allocation status") 219 220 section, err := e2e.GetSection(out, "Task Events:") 221 f.NoError(err, out) 222 223 restarts, err := e2e.GetField(section, "Total Restarts") 224 f.NoError(err) 225 f.Equal("1", restarts, "expected no new restarts for group") 226 } 227 } 228 229 // TestTemplatePathInterpolation_Ok asserts that NOMAD_*_DIR variables are 230 // properly interpolated into template source and destination paths without 231 // being treated as escaping. 232 func (tc *ConsulTemplateTest) TestTemplatePathInterpolation_Ok(f *framework.F) { 233 jobID := "template-paths-" + uuid.Generate()[:8] 234 tc.jobIDs = append(tc.jobIDs, jobID) 235 236 allocStubs := e2eutil.RegisterAndWaitForAllocs( 237 f.T(), tc.Nomad(), "consultemplate/input/template_paths.nomad", jobID, "") 238 f.Len(allocStubs, 1) 239 allocID := allocStubs[0].ID 240 241 e2eutil.WaitForAllocRunning(f.T(), tc.Nomad(), allocID) 242 243 f.NoError(waitForTemplateRender(allocID, "task/secrets/foo/dst", 244 func(out string) bool { 245 return len(out) > 0 246 }, nil), "expected file to have contents") 247 248 f.NoError(waitForTemplateRender(allocID, "alloc/shared.txt", 249 func(out string) bool { 250 return len(out) > 0 251 }, nil), "expected shared-alloc-dir file to have contents") 252 } 253 254 // TestTemplatePathInterpolation_Bad asserts that template.source paths are not 255 // allowed to escape the sandbox directory tree by default. 256 func (tc *ConsulTemplateTest) TestTemplatePathInterpolation_Bad(f *framework.F) { 257 wc := &e2e.WaitConfig{} 258 interval, retries := wc.OrDefault() 259 260 jobID := "bad-template-paths-" + uuid.Generate()[:8] 261 tc.jobIDs = append(tc.jobIDs, jobID) 262 263 allocStubs := e2eutil.RegisterAndWaitForAllocs( 264 f.T(), tc.Nomad(), "consultemplate/input/bad_template_paths.nomad", jobID, "") 265 f.Len(allocStubs, 1) 266 allocID := allocStubs[0].ID 267 268 // Wait for alloc to fail 269 var err error 270 var alloc *api.Allocation 271 testutil.WaitForResultRetries(retries, func() (bool, error) { 272 time.Sleep(interval) 273 alloc, _, err = tc.Nomad().Allocations().Info(allocID, nil) 274 if err != nil { 275 return false, err 276 } 277 278 return alloc.ClientStatus == structs.AllocClientStatusFailed, fmt.Errorf("expected status failed, but was: %s", alloc.ClientStatus) 279 }, func(err error) { 280 f.NoError(err, "failed to wait on alloc") 281 }) 282 283 // Assert the "source escapes" error occurred to prevent false 284 // positives. 285 found := false 286 for _, event := range alloc.TaskStates["task"].Events { 287 if strings.Contains(event.DisplayMessage, "template source path escapes alloc directory") { 288 found = true 289 break 290 } 291 } 292 f.True(found, "alloc failed but NOT due to expected source path escape error") 293 } 294 295 // TestTemplatePathInterpolation_SharedAlloc asserts that NOMAD_ALLOC_DIR 296 // is supported as a destination for artifact and template blocks, and 297 // that it is properly interpolated for task drivers with varying 298 // filesystem isolation 299 func (tc *ConsulTemplateTest) TestTemplatePathInterpolation_SharedAllocDir(f *framework.F) { 300 jobID := "template-shared-alloc-" + uuid.Generate()[:8] 301 tc.jobIDs = append(tc.jobIDs, jobID) 302 303 allocStubs := e2eutil.RegisterAndWaitForAllocs( 304 f.T(), tc.Nomad(), "consultemplate/input/template_shared_alloc.nomad", jobID, "") 305 f.Len(allocStubs, 1) 306 allocID := allocStubs[0].ID 307 308 e2eutil.WaitForAllocRunning(f.T(), tc.Nomad(), allocID) 309 310 for _, task := range []string{"docker", "exec", "raw_exec"} { 311 312 // tests that we can render templates into the shared alloc directory 313 f.NoError(waitForTaskFile(allocID, task, "${NOMAD_ALLOC_DIR}/raw_exec.env", 314 func(out string) bool { 315 return len(out) > 0 && strings.TrimSpace(out) != "/alloc" 316 }, nil), "expected raw_exec.env to not be '/alloc'") 317 318 f.NoError(waitForTaskFile(allocID, task, "${NOMAD_ALLOC_DIR}/exec.env", 319 func(out string) bool { 320 return strings.TrimSpace(out) == "/alloc" 321 }, nil), "expected shared exec.env to contain '/alloc'") 322 323 f.NoError(waitForTaskFile(allocID, task, "${NOMAD_ALLOC_DIR}/docker.env", 324 func(out string) bool { 325 return strings.TrimSpace(out) == "/alloc" 326 }, nil), "expected shared docker.env to contain '/alloc'") 327 328 // test that we can fetch artifacts into the shared alloc directory 329 for _, a := range []string{"google1.html", "google2.html", "google3.html"} { 330 f.NoError(waitForTaskFile(allocID, task, "${NOMAD_ALLOC_DIR}/"+a, 331 func(out string) bool { 332 return len(out) > 0 333 }, nil), "expected artifact in alloc dir") 334 } 335 336 // test that we can load environment variables rendered with templates using interpolated paths 337 out, err := e2e.Command("nomad", "alloc", "exec", "-task", task, allocID, "sh", "-c", "env") 338 f.NoError(err) 339 f.Contains(out, "HELLO_FROM=raw_exec") 340 } 341 } 342 343 func waitForTaskFile(allocID, task, path string, test func(out string) bool, wc *e2e.WaitConfig) error { 344 var err error 345 var out string 346 interval, retries := wc.OrDefault() 347 348 testutil.WaitForResultRetries(retries, func() (bool, error) { 349 time.Sleep(interval) 350 out, err = e2e.Command("nomad", "alloc", "exec", "-task", task, allocID, "sh", "-c", "cat "+path) 351 if err != nil { 352 return false, fmt.Errorf("could not cat file %q from task %q in allocation %q: %v", 353 path, task, allocID, err) 354 } 355 return test(out), nil 356 }, func(e error) { 357 err = fmt.Errorf("test for file content failed: got %#v\nerror: %v", out, e) 358 }) 359 return err 360 } 361 362 // waitForTemplateRender is a helper that grabs a file via alloc fs 363 // and tests it for 364 func waitForTemplateRender(allocID, path string, test func(string) bool, wc *e2e.WaitConfig) error { 365 var err error 366 var out string 367 interval, retries := wc.OrDefault() 368 369 testutil.WaitForResultRetries(retries, func() (bool, error) { 370 time.Sleep(interval) 371 out, err = e2e.Command("nomad", "alloc", "fs", allocID, path) 372 if err != nil { 373 return false, fmt.Errorf("could not get file %q from allocation %q: %v", 374 path, allocID, err) 375 } 376 return test(out), nil 377 }, func(e error) { 378 err = fmt.Errorf("test for file content failed: got %#v\nerror: %v", out, e) 379 }) 380 return err 381 } 382 383 // waitForAllocStatusByGroup is similar to WaitForAllocStatus but maps 384 // specific task group names to statuses without having to deal with specific counts 385 func waitForAllocStatusByGroup(jobID, ns string, expected map[string]string, wc *e2e.WaitConfig) error { 386 var got []map[string]string 387 var err error 388 interval, retries := wc.OrDefault() 389 testutil.WaitForResultRetries(retries, func() (bool, error) { 390 time.Sleep(interval) 391 got, err = e2e.AllocsForJob(jobID, ns) 392 if err != nil { 393 return false, err 394 } 395 for _, row := range got { 396 group := row["Task Group"] 397 expectedStatus := expected[group] 398 gotStatus := row["Status"] 399 if expectedStatus != gotStatus { 400 return false, fmt.Errorf("expected %q to be %q, got %q", 401 group, expectedStatus, gotStatus) 402 } 403 } 404 err = nil 405 return true, nil 406 }, func(e error) { 407 err = fmt.Errorf("alloc status check failed: got %#v\nerror: %v", got, e) 408 }) 409 return err 410 }