github.com/hernad/nomad@v1.6.112/e2e/csi/efs.go (about)

     1  // Copyright (c) HashiCorp, Inc.
     2  // SPDX-License-Identifier: MPL-2.0
     3  
     4  package csi
     5  
     6  import (
     7  	"fmt"
     8  	"os"
     9  
    10  	e2e "github.com/hernad/nomad/e2e/e2eutil"
    11  	"github.com/hernad/nomad/e2e/framework"
    12  	"github.com/hernad/nomad/helper/uuid"
    13  	"github.com/stretchr/testify/require"
    14  )
    15  
    16  // CSINodeOnlyPluginEFSTest exercises the AWS EFS plugin, which is an
    17  // example of a plugin that can run in Node-only mode.
    18  type CSINodeOnlyPluginEFSTest struct {
    19  	framework.TC
    20  	uuid         string
    21  	testJobIDs   []string
    22  	volumeIDs    []string
    23  	pluginJobIDs []string
    24  }
    25  
    26  const efsPluginID = "aws-efs0"
    27  
    28  func (tc *CSINodeOnlyPluginEFSTest) BeforeAll(f *framework.F) {
    29  	t := f.T()
    30  
    31  	_, err := os.Stat("csi/input/volume-efs.hcl")
    32  	if err != nil {
    33  		t.Skip("skipping CSI test because EFS volume spec file missing:", err)
    34  	}
    35  
    36  	// Ensure cluster has leader and at least two client
    37  	// nodes in a ready state before running tests
    38  	e2e.WaitForLeader(t, tc.Nomad())
    39  	e2e.WaitForNodesReady(t, tc.Nomad(), 2)
    40  }
    41  
    42  // TestEFSVolumeClaim launches AWS EFS plugins and registers an EFS volume
    43  // as a Nomad CSI volume. We then deploy a job that writes to the volume,
    44  // and share the volume with another job which should be able to read the
    45  // data written by the first job.
    46  func (tc *CSINodeOnlyPluginEFSTest) TestEFSVolumeClaim(f *framework.F) {
    47  	t := f.T()
    48  	require := require.New(t)
    49  	nomadClient := tc.Nomad()
    50  	tc.uuid = uuid.Generate()[0:8]
    51  
    52  	// deploy the node plugins job (no need for a controller for EFS)
    53  	nodesJobID := "aws-efs-plugin-nodes-" + tc.uuid
    54  	f.NoError(e2e.Register(nodesJobID, "csi/input/plugin-aws-efs-nodes.nomad"))
    55  	tc.pluginJobIDs = append(tc.pluginJobIDs, nodesJobID)
    56  
    57  	f.NoError(e2e.WaitForAllocStatusComparison(
    58  		func() ([]string, error) { return e2e.AllocStatuses(nodesJobID, ns) },
    59  		func(got []string) bool {
    60  			for _, status := range got {
    61  				if status != "running" {
    62  					return false
    63  				}
    64  			}
    65  			return true
    66  		}, pluginAllocWait,
    67  	), "plugin job should be running")
    68  
    69  	f.NoError(waitForPluginStatusMinNodeCount(efsPluginID, 2, pluginWait),
    70  		"aws-efs0 node plugins did not become healthy")
    71  
    72  	// register a volume
    73  	volID := "efs-vol0"
    74  	err := volumeRegister(volID, "csi/input/volume-efs.hcl", "register")
    75  	require.NoError(err)
    76  	tc.volumeIDs = append(tc.volumeIDs, volID)
    77  
    78  	// deploy a job that writes to the volume
    79  	writeJobID := "write-efs-" + tc.uuid
    80  	tc.testJobIDs = append(tc.testJobIDs, writeJobID) // ensure failed tests clean up
    81  	f.NoError(e2e.Register(writeJobID, "csi/input/use-efs-volume-write.nomad"))
    82  	f.NoError(
    83  		e2e.WaitForAllocStatusExpected(writeJobID, ns, []string{"running"}),
    84  		"job should be running")
    85  
    86  	allocs, err := e2e.AllocsForJob(writeJobID, ns)
    87  	f.NoError(err, "could not get allocs for write job")
    88  	f.Len(allocs, 1, "could not get allocs for write job")
    89  	writeAllocID := allocs[0]["ID"]
    90  
    91  	// read data from volume and assert the writer wrote a file to it
    92  	expectedPath := "/task/test/" + writeAllocID
    93  	_, err = readFile(nomadClient, writeAllocID, expectedPath)
    94  	require.NoError(err)
    95  
    96  	// Shutdown the writer so we can run a reader.
    97  	// although EFS should support multiple readers, the plugin
    98  	// does not.
    99  	err = e2e.StopJob(writeJobID)
   100  	require.NoError(err)
   101  
   102  	// wait for the volume unpublish workflow to complete
   103  	require.NoError(waitForVolumeClaimRelease(volID, reapWait),
   104  		"write-efs alloc claim was not released")
   105  
   106  	// deploy a job that reads from the volume
   107  	readJobID := "read-efs-" + tc.uuid
   108  	tc.testJobIDs = append(tc.testJobIDs, readJobID) // ensure failed tests clean up
   109  	f.NoError(e2e.Register(readJobID, "csi/input/use-efs-volume-read.nomad"))
   110  	f.NoError(
   111  		e2e.WaitForAllocStatusExpected(readJobID, ns, []string{"running"}),
   112  		"job should be running")
   113  
   114  	allocs, err = e2e.AllocsForJob(readJobID, ns)
   115  	f.NoError(err, "could not get allocs for read job")
   116  	f.Len(allocs, 1, "could not get allocs for read job")
   117  	readAllocID := allocs[0]["ID"]
   118  
   119  	// read data from volume and assert the writer wrote a file to it
   120  	require.NoError(err)
   121  	_, err = readFile(nomadClient, readAllocID, expectedPath)
   122  	require.NoError(err)
   123  }
   124  
   125  func (tc *CSINodeOnlyPluginEFSTest) AfterEach(f *framework.F) {
   126  
   127  	// Stop all jobs in test
   128  	for _, id := range tc.testJobIDs {
   129  		err := e2e.StopJob(id, "-purge")
   130  		f.Assert().NoError(err)
   131  	}
   132  	tc.testJobIDs = []string{}
   133  
   134  	// Deregister all volumes in test
   135  	for _, id := range tc.volumeIDs {
   136  		// make sure all the test jobs have finished unpublishing claims
   137  		err := waitForVolumeClaimRelease(id, reapWait)
   138  		f.Assert().NoError(err, "volume claims were not released")
   139  
   140  		out, err := e2e.Command("nomad", "volume", "deregister", id)
   141  		assertNoErrorElseDump(f, err,
   142  			fmt.Sprintf("could not deregister volume:\n%v", out), tc.pluginJobIDs)
   143  	}
   144  	tc.volumeIDs = []string{}
   145  
   146  	// Deregister all plugin jobs in test
   147  	for _, id := range tc.pluginJobIDs {
   148  		err := e2e.StopJob(id, "-purge")
   149  		f.Assert().NoError(err)
   150  	}
   151  	tc.pluginJobIDs = []string{}
   152  
   153  	// Garbage collect
   154  	out, err := e2e.Command("nomad", "system", "gc")
   155  	f.Assert().NoError(err, out)
   156  }