github.com/iqoqo/nomad@v0.11.3-0.20200911112621-d7021c74d101/e2e/csi/csi.go (about)

     1  package csi
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"io/ioutil"
     7  	"os"
     8  	"time"
     9  
    10  	"github.com/hashicorp/hcl"
    11  	"github.com/hashicorp/nomad/api"
    12  	"github.com/hashicorp/nomad/e2e/e2eutil"
    13  	"github.com/hashicorp/nomad/e2e/framework"
    14  	"github.com/hashicorp/nomad/helper"
    15  	"github.com/hashicorp/nomad/helper/uuid"
    16  	"github.com/stretchr/testify/require"
    17  )
    18  
    19  type CSIVolumesTest struct {
    20  	framework.TC
    21  	testJobIDs   []string
    22  	volumeIDs    []string
    23  	pluginJobIDs []string
    24  }
    25  
    26  func init() {
    27  	framework.AddSuites(&framework.TestSuite{
    28  		Component:   "CSI",
    29  		CanRunLocal: true,
    30  		Consul:      false,
    31  		Cases: []framework.TestCase{
    32  			new(CSIVolumesTest),
    33  		},
    34  	})
    35  }
    36  
    37  func (tc *CSIVolumesTest) BeforeAll(f *framework.F) {
    38  	t := f.T()
    39  	// Ensure cluster has leader and at least two client
    40  	// nodes in a ready state before running tests
    41  	e2eutil.WaitForLeader(t, tc.Nomad())
    42  	e2eutil.WaitForNodesReady(t, tc.Nomad(), 2)
    43  }
    44  
    45  // TestEBSVolumeClaim launches AWS EBS plugins and registers an EBS volume
    46  // as a Nomad CSI volume. We then deploy a job that writes to the volume,
    47  // stop that job, and reuse the volume for another job which should be able
    48  // to read the data written by the first job.
    49  func (tc *CSIVolumesTest) TestEBSVolumeClaim(f *framework.F) {
    50  	t := f.T()
    51  	require := require.New(t)
    52  	nomadClient := tc.Nomad()
    53  	uuid := uuid.Generate()
    54  
    55  	// deploy the controller plugin job
    56  	controllerJobID := "aws-ebs-plugin-controller-" + uuid[0:8]
    57  	tc.pluginJobIDs = append(tc.pluginJobIDs, controllerJobID)
    58  	e2eutil.RegisterAndWaitForAllocs(t, nomadClient,
    59  		"csi/input/plugin-aws-ebs-controller.nomad", controllerJobID, "")
    60  
    61  	// deploy the node plugins job
    62  	nodesJobID := "aws-ebs-plugin-nodes-" + uuid[0:8]
    63  	tc.pluginJobIDs = append(tc.pluginJobIDs, nodesJobID)
    64  	e2eutil.RegisterAndWaitForAllocs(t, nomadClient,
    65  		"csi/input/plugin-aws-ebs-nodes.nomad", nodesJobID, "")
    66  
    67  	// wait for plugin to become healthy
    68  	require.Eventuallyf(func() bool {
    69  		plugin, _, err := nomadClient.CSIPlugins().Info("aws-ebs0", nil)
    70  		if err != nil {
    71  			return false
    72  		}
    73  		if plugin.ControllersHealthy != 1 || plugin.NodesHealthy < 2 {
    74  			return false
    75  		}
    76  		return true
    77  		// TODO(tgross): cut down this time after fixing
    78  		// https://github.com/hashicorp/nomad/issues/7296
    79  	}, 90*time.Second, 5*time.Second, "aws-ebs0 plugins did not become healthy")
    80  
    81  	// register a volume
    82  	volID := "ebs-vol0"
    83  	vol, err := parseVolumeFile("csi/input/volume-ebs.hcl")
    84  	require.NoError(err)
    85  	_, err = nomadClient.CSIVolumes().Register(vol, nil)
    86  	require.NoError(err)
    87  	tc.volumeIDs = append(tc.volumeIDs, volID)
    88  
    89  	// deploy a job that writes to the volume
    90  	writeJobID := "write-ebs-" + uuid[0:8]
    91  	tc.testJobIDs = append(tc.testJobIDs, writeJobID)
    92  	writeAllocs := e2eutil.RegisterAndWaitForAllocs(t, nomadClient,
    93  		"csi/input/use-ebs-volume.nomad", writeJobID, "")
    94  	writeAllocID := writeAllocs[0].ID
    95  	tc.testJobIDs = append(tc.testJobIDs, writeJobID) // ensure failed tests clean up
    96  	e2eutil.WaitForAllocRunning(t, nomadClient, writeAllocID)
    97  
    98  	// read data from volume and assert the writer wrote a file to it
    99  	writeAlloc, _, err := nomadClient.Allocations().Info(writeAllocID, nil)
   100  	require.NoError(err)
   101  	expectedPath := "/local/test/" + writeAllocID
   102  	_, err = readFile(nomadClient, writeAlloc, expectedPath)
   103  	require.NoError(err)
   104  
   105  	// Shutdown (and purge) the writer so we can run a reader.
   106  	// we could mount the EBS volume with multi-attach, but we
   107  	// want this test to exercise the unpublish workflow.
   108  	// this runs the equivalent of 'nomad job stop -purge'
   109  	nomadClient.Jobs().Deregister(writeJobID, true, nil)
   110  	// instead of waiting for the alloc to stop, wait for the volume claim gc run
   111  	require.Eventuallyf(func() bool {
   112  		vol, _, err := nomadClient.CSIVolumes().Info(volID, nil)
   113  		if err != nil {
   114  			return false
   115  		}
   116  		return len(vol.WriteAllocs) == 0
   117  	}, 90*time.Second, 5*time.Second, "write-ebs alloc claim was not released")
   118  
   119  	// deploy a job so we can read from the volume
   120  	readJobID := "read-ebs-" + uuid[0:8]
   121  	tc.testJobIDs = append(tc.testJobIDs, readJobID)
   122  	readAllocs := e2eutil.RegisterAndWaitForAllocs(t, nomadClient,
   123  		"csi/input/use-ebs-volume.nomad", readJobID, "")
   124  	readAllocID := readAllocs[0].ID
   125  	e2eutil.WaitForAllocRunning(t, nomadClient, readAllocID)
   126  
   127  	// read data from volume and assert the writer wrote a file to it
   128  	readAlloc, _, err := nomadClient.Allocations().Info(readAllocID, nil)
   129  	require.NoError(err)
   130  	_, err = readFile(nomadClient, readAlloc, expectedPath)
   131  	require.NoError(err)
   132  }
   133  
   134  // TestEFSVolumeClaim launches AWS EFS plugins and registers an EFS volume
   135  // as a Nomad CSI volume. We then deploy a job that writes to the volume,
   136  // and share the volume with another job which should be able to read the
   137  // data written by the first job.
   138  func (tc *CSIVolumesTest) TestEFSVolumeClaim(f *framework.F) {
   139  	t := f.T()
   140  	require := require.New(t)
   141  	nomadClient := tc.Nomad()
   142  	uuid := uuid.Generate()
   143  
   144  	// deploy the node plugins job (no need for a controller for EFS)
   145  	nodesJobID := "aws-efs-plugin-nodes-" + uuid[0:8]
   146  	tc.pluginJobIDs = append(tc.pluginJobIDs, nodesJobID)
   147  	e2eutil.RegisterAndWaitForAllocs(t, nomadClient,
   148  		"csi/input/plugin-aws-efs-nodes.nomad", nodesJobID, "")
   149  
   150  	// wait for plugin to become healthy
   151  	require.Eventuallyf(func() bool {
   152  		plugin, _, err := nomadClient.CSIPlugins().Info("aws-efs0", nil)
   153  		if err != nil {
   154  			return false
   155  		}
   156  		if plugin.NodesHealthy < 2 {
   157  			return false
   158  		}
   159  		return true
   160  		// TODO(tgross): cut down this time after fixing
   161  		// https://github.com/hashicorp/nomad/issues/7296
   162  	}, 90*time.Second, 5*time.Second, "aws-efs0 plugins did not become healthy")
   163  
   164  	// register a volume
   165  	volID := "efs-vol0"
   166  	vol, err := parseVolumeFile("csi/input/volume-efs.hcl")
   167  	require.NoError(err)
   168  	_, err = nomadClient.CSIVolumes().Register(vol, nil)
   169  	require.NoError(err)
   170  	tc.volumeIDs = append(tc.volumeIDs, volID)
   171  
   172  	// deploy a job that writes to the volume
   173  	writeJobID := "write-efs-" + uuid[0:8]
   174  	writeAllocs := e2eutil.RegisterAndWaitForAllocs(t, nomadClient,
   175  		"csi/input/use-efs-volume-write.nomad", writeJobID, "")
   176  	writeAllocID := writeAllocs[0].ID
   177  	tc.testJobIDs = append(tc.testJobIDs, writeJobID) // ensure failed tests clean up
   178  	e2eutil.WaitForAllocRunning(t, nomadClient, writeAllocID)
   179  
   180  	// read data from volume and assert the writer wrote a file to it
   181  	writeAlloc, _, err := nomadClient.Allocations().Info(writeAllocID, nil)
   182  	require.NoError(err)
   183  	expectedPath := "/local/test/" + writeAllocID
   184  	_, err = readFile(nomadClient, writeAlloc, expectedPath)
   185  	require.NoError(err)
   186  
   187  	// Shutdown the writer so we can run a reader.
   188  	// although EFS should support multiple readers, the plugin
   189  	// does not.
   190  	// this runs the equivalent of 'nomad job stop'
   191  	nomadClient.Jobs().Deregister(writeJobID, false, nil)
   192  	// instead of waiting for the alloc to stop, wait for the volume claim gc run
   193  	require.Eventuallyf(func() bool {
   194  		vol, _, err := nomadClient.CSIVolumes().Info(volID, nil)
   195  		if err != nil {
   196  			return false
   197  		}
   198  		return len(vol.WriteAllocs) == 0
   199  	}, 90*time.Second, 5*time.Second, "write-efs alloc claim was not released")
   200  
   201  	// deploy a job that reads from the volume.
   202  	readJobID := "read-efs-" + uuid[0:8]
   203  	tc.testJobIDs = append(tc.testJobIDs, readJobID)
   204  	readAllocs := e2eutil.RegisterAndWaitForAllocs(t, nomadClient,
   205  		"csi/input/use-efs-volume-read.nomad", readJobID, "")
   206  	e2eutil.WaitForAllocRunning(t, nomadClient, readAllocs[0].ID)
   207  
   208  	// read data from volume and assert the writer wrote a file to it
   209  	readAlloc, _, err := nomadClient.Allocations().Info(readAllocs[0].ID, nil)
   210  	require.NoError(err)
   211  	_, err = readFile(nomadClient, readAlloc, expectedPath)
   212  	require.NoError(err)
   213  }
   214  
   215  func (tc *CSIVolumesTest) AfterEach(f *framework.F) {
   216  	nomadClient := tc.Nomad()
   217  	jobs := nomadClient.Jobs()
   218  	// Stop all jobs in test
   219  	for _, id := range tc.testJobIDs {
   220  		jobs.Deregister(id, true, nil)
   221  	}
   222  	// Deregister all volumes in test
   223  	for _, id := range tc.volumeIDs {
   224  		nomadClient.CSIVolumes().Deregister(id, nil)
   225  	}
   226  	// Deregister all plugin jobs in test
   227  	for _, id := range tc.pluginJobIDs {
   228  		jobs.Deregister(id, true, nil)
   229  	}
   230  
   231  	// Garbage collect
   232  	nomadClient.System().GarbageCollect()
   233  }
   234  
   235  // TODO(tgross): replace this w/ AllocFS().Stat() after
   236  // https://github.com/hashicorp/nomad/issues/7365 is fixed
   237  func readFile(client *api.Client, alloc *api.Allocation, path string) (bytes.Buffer, error) {
   238  	ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second)
   239  	defer cancelFn()
   240  
   241  	var stdout, stderr bytes.Buffer
   242  	_, err := client.Allocations().Exec(ctx,
   243  		alloc, "task", false,
   244  		[]string{"cat", path},
   245  		os.Stdin, &stdout, &stderr,
   246  		make(chan api.TerminalSize), nil)
   247  	return stdout, err
   248  }
   249  
   250  // TODO(tgross): this is taken from `nomad volume register` but
   251  // it would be nice if we could expose this with a ParseFile as
   252  // we do for api.Job.
   253  func parseVolumeFile(filepath string) (*api.CSIVolume, error) {
   254  
   255  	rawInput, err := ioutil.ReadFile(filepath)
   256  	if err != nil {
   257  		return nil, err
   258  	}
   259  	ast, err := hcl.Parse(string(rawInput))
   260  	if err != nil {
   261  		return nil, err
   262  	}
   263  
   264  	output := &api.CSIVolume{}
   265  	err = hcl.DecodeObject(output, ast)
   266  	if err != nil {
   267  		return nil, err
   268  	}
   269  
   270  	// api.CSIVolume doesn't have the type field, it's used only for
   271  	// dispatch in parseVolumeType
   272  	helper.RemoveEqualFold(&output.ExtraKeysHCL, "type")
   273  	err = helper.UnusedKeys(output)
   274  	if err != nil {
   275  		return nil, err
   276  	}
   277  
   278  	return output, nil
   279  }