github.com/hernad/nomad@v1.6.112/e2e/volumes/volumes_test.go (about) 1 // Copyright (c) HashiCorp, Inc. 2 // SPDX-License-Identifier: MPL-2.0 3 4 package volumes 5 6 import ( 7 "fmt" 8 "testing" 9 "time" 10 11 "github.com/stretchr/testify/require" 12 13 "github.com/hernad/nomad/api" 14 "github.com/hernad/nomad/e2e/e2eutil" 15 "github.com/hernad/nomad/helper/uuid" 16 "github.com/hernad/nomad/jobspec" 17 "github.com/hernad/nomad/testutil" 18 ) 19 20 const ns = "" 21 22 // TestVolumeMounts exercises host volume and Docker volume functionality for 23 // the exec and docker task driver, particularly around mounting locations 24 // within the container and how this is exposed to the user. 25 func TestVolumeMounts(t *testing.T) { 26 27 nomad := e2eutil.NomadClient(t) 28 e2eutil.WaitForLeader(t, nomad) 29 e2eutil.WaitForNodesReady(t, nomad, 1) 30 31 jobIDs := []string{} 32 t.Cleanup(e2eutil.CleanupJobsAndGC(t, &jobIDs)) 33 34 jobID := "test-node-drain-" + uuid.Short() 35 require.NoError(t, e2eutil.Register(jobID, "./input/volumes.nomad")) 36 jobIDs = append(jobIDs, jobID) 37 38 expected := []string{"running"} 39 require.NoError(t, e2eutil.WaitForAllocStatusExpected(jobID, ns, expected), 40 "job should be running") 41 42 allocs, err := e2eutil.AllocsForJob(jobID, ns) 43 require.NoError(t, err, "could not get allocs for job") 44 allocID := allocs[0]["ID"] 45 nodeID := allocs[0]["Node ID"] 46 47 cmdToExec := fmt.Sprintf("cat /tmp/foo/%s", allocID) 48 49 out, err := e2eutil.AllocExec(allocID, "docker_task", cmdToExec, ns, nil) 50 require.NoError(t, err, "could not exec into task: docker_task") 51 require.Equal(t, allocID+"\n", out, "alloc data is missing from docker_task") 52 53 out, err = e2eutil.AllocExec(allocID, "exec_task", cmdToExec, ns, nil) 54 require.NoError(t, err, "could not exec into task: exec_task") 55 require.Equal(t, out, allocID+"\n", "alloc data is missing from exec_task") 56 57 err = e2eutil.StopJob(jobID) 58 require.NoError(t, err, "could not stop job") 59 60 // modify the job so that we make sure it's placed back on the same host. 61 // we want to be able to verify that the data from the previous alloc is 62 // still there 63 job, err := jobspec.ParseFile("./input/volumes.nomad") 64 require.NoError(t, err) 65 job.ID = &jobID 66 job.Constraints = []*api.Constraint{ 67 { 68 LTarget: "${node.unique.id}", 69 RTarget: nodeID, 70 Operand: "=", 71 }, 72 } 73 _, _, err = nomad.Jobs().Register(job, nil) 74 require.NoError(t, err, "could not register updated job") 75 76 testutil.WaitForResultRetries(5000, func() (bool, error) { 77 time.Sleep(time.Millisecond * 100) 78 allocs, err = e2eutil.AllocsForJob(jobID, ns) 79 if err != nil { 80 return false, err 81 } 82 if len(allocs) < 2 { 83 return false, fmt.Errorf("no new allocation for %v: %v", jobID, allocs) 84 } 85 86 return true, nil 87 }, func(e error) { 88 require.NoError(t, e, "failed to get new alloc") 89 }) 90 91 newAllocID := allocs[0]["ID"] 92 93 newCmdToExec := fmt.Sprintf("cat /tmp/foo/%s", newAllocID) 94 95 out, err = e2eutil.AllocExec(newAllocID, "docker_task", cmdToExec, ns, nil) 96 require.NoError(t, err, "could not exec into task: docker_task") 97 require.Equal(t, out, allocID+"\n", "previous alloc data is missing from docker_task") 98 99 out, err = e2eutil.AllocExec(newAllocID, "docker_task", newCmdToExec, ns, nil) 100 require.NoError(t, err, "could not exec into task: docker_task") 101 require.Equal(t, out, newAllocID+"\n", "new alloc data is missing from docker_task") 102 103 out, err = e2eutil.AllocExec(newAllocID, "exec_task", cmdToExec, ns, nil) 104 require.NoError(t, err, "could not exec into task: exec_task") 105 require.Equal(t, out, allocID+"\n", "previous alloc data is missing from exec_task") 106 107 out, err = e2eutil.AllocExec(newAllocID, "exec_task", newCmdToExec, ns, nil) 108 require.NoError(t, err, "could not exec into task: exec_task") 109 require.Equal(t, out, newAllocID+"\n", "new alloc data is missing from exec_task") 110 }