github.com/uchennaokeke444/nomad@v0.11.8/e2e/hostvolumes/host_volumes.go (about)

     1  package hostvolumes
     2  
     3  import (
     4  	"time"
     5  
     6  	"github.com/hashicorp/nomad/e2e/e2eutil"
     7  	"github.com/hashicorp/nomad/e2e/framework"
     8  	"github.com/hashicorp/nomad/helper/uuid"
     9  	"github.com/hashicorp/nomad/nomad/structs"
    10  	"github.com/stretchr/testify/require"
    11  )
    12  
    13  type BasicHostVolumeTest struct {
    14  	framework.TC
    15  	jobIds []string
    16  }
    17  
    18  func init() {
    19  	framework.AddSuites(&framework.TestSuite{
    20  		Component:   "Host Volumes",
    21  		CanRunLocal: true,
    22  		Cases: []framework.TestCase{
    23  			new(BasicHostVolumeTest),
    24  		},
    25  	})
    26  }
    27  
    28  func (tc *BasicHostVolumeTest) BeforeAll(f *framework.F) {
    29  	// Ensure cluster has leader before running tests
    30  	e2eutil.WaitForLeader(f.T(), tc.Nomad())
    31  	// Ensure that we have at least 1 client nodes in ready state
    32  	e2eutil.WaitForNodesReady(f.T(), tc.Nomad(), 1)
    33  }
    34  
    35  func (tc *BasicHostVolumeTest) TestSingleHostVolume(f *framework.F) {
    36  	require := require.New(f.T())
    37  
    38  	nomadClient := tc.Nomad()
    39  	uuid := uuid.Generate()
    40  	jobID := "hostvol" + uuid[0:8]
    41  	tc.jobIds = append(tc.jobIds, jobID)
    42  	allocs := e2eutil.RegisterAndWaitForAllocs(f.T(), nomadClient, "hostvolumes/input/single_mount.nomad", jobID, "")
    43  
    44  	waitForTaskState := func(desiredState string) {
    45  		require.Eventually(func() bool {
    46  			allocs, _, _ := nomadClient.Jobs().Allocations(jobID, false, nil)
    47  			if len(allocs) != 1 {
    48  				return false
    49  			}
    50  			first := allocs[0]
    51  			taskState := first.TaskStates["test"]
    52  			if taskState == nil {
    53  				return false
    54  			}
    55  			return taskState.State == desiredState
    56  		}, 30*time.Second, 1*time.Second)
    57  	}
    58  
    59  	waitForClientAllocStatus := func(desiredStatus string) {
    60  		require.Eventually(func() bool {
    61  			allocSummaries, _, _ := nomadClient.Jobs().Allocations(jobID, false, nil)
    62  			if len(allocSummaries) != 1 {
    63  				return false
    64  			}
    65  
    66  			alloc, _, _ := nomadClient.Allocations().Info(allocSummaries[0].ID, nil)
    67  			if alloc == nil {
    68  				return false
    69  			}
    70  
    71  			return alloc.ClientStatus == desiredStatus
    72  		}, 30*time.Second, 1*time.Second)
    73  	}
    74  
    75  	waitForRestartCount := func(desiredCount uint64) {
    76  		require.Eventually(func() bool {
    77  			allocs, _, _ := nomadClient.Jobs().Allocations(jobID, false, nil)
    78  			if len(allocs) != 1 {
    79  				return false
    80  			}
    81  			first := allocs[0]
    82  			return first.TaskStates["test"].Restarts == desiredCount
    83  		}, 30*time.Second, 1*time.Second)
    84  	}
    85  
    86  	// Verify scheduling
    87  	for _, allocStub := range allocs {
    88  		node, _, err := nomadClient.Nodes().Info(allocStub.NodeID, nil)
    89  		require.Nil(err)
    90  
    91  		_, ok := node.HostVolumes["shared_data"]
    92  		require.True(ok, "Node does not have the requested volume")
    93  	}
    94  
    95  	// Wrap in retry to wait until running
    96  	waitForTaskState(structs.TaskStateRunning)
    97  
    98  	// Client should be running
    99  	waitForClientAllocStatus(structs.AllocClientStatusRunning)
   100  
   101  	// Should not be restarted
   102  	waitForRestartCount(0)
   103  
   104  	// Ensure allocs can be restarted
   105  	for _, allocStub := range allocs {
   106  		alloc, _, err := nomadClient.Allocations().Info(allocStub.ID, nil)
   107  		require.Nil(err)
   108  
   109  		err = nomadClient.Allocations().Restart(alloc, "", nil)
   110  		require.Nil(err)
   111  	}
   112  
   113  	// Should be restarted once
   114  	waitForRestartCount(1)
   115  
   116  	// Wrap in retry to wait until running again
   117  	waitForTaskState(structs.TaskStateRunning)
   118  
   119  	// Client should be running again
   120  	waitForClientAllocStatus(structs.AllocClientStatusRunning)
   121  }
   122  
   123  func (tc *BasicHostVolumeTest) AfterEach(f *framework.F) {
   124  	nomadClient := tc.Nomad()
   125  	jobs := nomadClient.Jobs()
   126  	// Stop all jobs in test
   127  	for _, id := range tc.jobIds {
   128  		jobs.Deregister(id, true, nil)
   129  	}
   130  	// Garbage collect
   131  	nomadClient.System().GarbageCollect()
   132  }