github.com/bigcommerce/nomad@v0.9.3-bc/drivers/rkt/driver_test.go (about)

     1  package rkt
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"fmt"
     7  	"io/ioutil"
     8  	"os"
     9  	"path/filepath"
    10  	"sync"
    11  	"testing"
    12  	"time"
    13  
    14  	ctestutil "github.com/hashicorp/nomad/client/testutil"
    15  	"github.com/hashicorp/nomad/helper/pluginutils/hclutils"
    16  	"github.com/hashicorp/nomad/helper/testlog"
    17  	"github.com/hashicorp/nomad/helper/testtask"
    18  	"github.com/hashicorp/nomad/helper/uuid"
    19  	"github.com/hashicorp/nomad/nomad/structs"
    20  	basePlug "github.com/hashicorp/nomad/plugins/base"
    21  	"github.com/hashicorp/nomad/plugins/drivers"
    22  	dtestutil "github.com/hashicorp/nomad/plugins/drivers/testutils"
    23  	"github.com/hashicorp/nomad/testutil"
    24  	"github.com/stretchr/testify/require"
    25  	"golang.org/x/sys/unix"
    26  )
    27  
    28  var _ drivers.DriverPlugin = (*Driver)(nil)
    29  
    30  func TestRktVersionRegex(t *testing.T) {
    31  	ctestutil.RktCompatible(t)
    32  	t.Parallel()
    33  
    34  	inputRkt := "rkt version 0.8.1"
    35  	inputAppc := "appc version 1.2.0"
    36  	expectedRkt := "0.8.1"
    37  	expectedAppc := "1.2.0"
    38  	rktMatches := reRktVersion.FindStringSubmatch(inputRkt)
    39  	appcMatches := reAppcVersion.FindStringSubmatch(inputAppc)
    40  	if rktMatches[1] != expectedRkt {
    41  		fmt.Printf("Test failed; got %q; want %q\n", rktMatches[1], expectedRkt)
    42  	}
    43  	if appcMatches[1] != expectedAppc {
    44  		fmt.Printf("Test failed; got %q; want %q\n", appcMatches[1], expectedAppc)
    45  	}
    46  }
    47  
    48  // Tests setting driver config options
    49  func TestRktDriver_SetConfig(t *testing.T) {
    50  	t.Parallel()
    51  	require := require.New(t)
    52  
    53  	d := NewRktDriver(testlog.HCLogger(t))
    54  	harness := dtestutil.NewDriverHarness(t, d)
    55  
    56  	// Enable Volumes
    57  	config := &Config{
    58  		VolumesEnabled: true,
    59  	}
    60  
    61  	var data []byte
    62  	require.NoError(basePlug.MsgPackEncode(&data, config))
    63  	bconfig := &basePlug.Config{PluginConfig: data}
    64  	require.NoError(harness.SetConfig(bconfig))
    65  	require.Exactly(config, d.(*Driver).config)
    66  
    67  	config.VolumesEnabled = false
    68  	data = []byte{}
    69  	require.NoError(basePlug.MsgPackEncode(&data, config))
    70  	bconfig = &basePlug.Config{PluginConfig: data}
    71  	require.NoError(harness.SetConfig(bconfig))
    72  	require.Exactly(config, d.(*Driver).config)
    73  
    74  }
    75  
    76  // Verifies using a trust prefix and passing dns servers and search domains
    77  // Also verifies sending sigterm correctly stops the driver instance
    78  func TestRktDriver_Start_Wait_Stop_DNS(t *testing.T) {
    79  	ctestutil.RktCompatible(t)
    80  	if !testutil.IsCI() {
    81  		t.Parallel()
    82  	}
    83  
    84  	require := require.New(t)
    85  	d := NewRktDriver(testlog.HCLogger(t))
    86  	harness := dtestutil.NewDriverHarness(t, d)
    87  
    88  	task := &drivers.TaskConfig{
    89  		ID:      uuid.Generate(),
    90  		AllocID: uuid.Generate(),
    91  		Name:    "etcd",
    92  		Resources: &drivers.Resources{
    93  			NomadResources: &structs.AllocatedTaskResources{
    94  				Memory: structs.AllocatedMemoryResources{
    95  					MemoryMB: 128,
    96  				},
    97  				Cpu: structs.AllocatedCpuResources{
    98  					CpuShares: 100,
    99  				},
   100  			},
   101  			LinuxResources: &drivers.LinuxResources{
   102  				MemoryLimitBytes: 134217728,
   103  				CPUShares:        100,
   104  			},
   105  		},
   106  	}
   107  
   108  	tc := &TaskConfig{
   109  		TrustPrefix:      "coreos.com/etcd",
   110  		ImageName:        "coreos.com/etcd:v2.0.4",
   111  		Command:          "/etcd",
   112  		DNSServers:       []string{"8.8.8.8", "8.8.4.4"},
   113  		DNSSearchDomains: []string{"example.com", "example.org", "example.net"},
   114  		Net:              []string{"host"},
   115  	}
   116  	require.NoError(task.EncodeConcreteDriverConfig(&tc))
   117  	testtask.SetTaskConfigEnv(task)
   118  	cleanup := harness.MkAllocDir(task, true)
   119  	defer cleanup()
   120  
   121  	handle, driverNet, err := harness.StartTask(task)
   122  	require.NoError(err)
   123  	require.Nil(driverNet)
   124  
   125  	ch, err := harness.WaitTask(context.Background(), handle.Config.ID)
   126  	require.NoError(err)
   127  
   128  	require.NoError(harness.WaitUntilStarted(task.ID, 1*time.Second))
   129  
   130  	go func() {
   131  		harness.StopTask(task.ID, 2*time.Second, "SIGTERM")
   132  	}()
   133  
   134  	select {
   135  	case result := <-ch:
   136  		require.Equal(int(unix.SIGTERM), result.Signal)
   137  	case <-time.After(10 * time.Second):
   138  		require.Fail("timeout waiting for task to shutdown")
   139  	}
   140  
   141  	// Ensure that the task is marked as dead, but account
   142  	// for WaitTask() closing channel before internal state is updated
   143  	testutil.WaitForResult(func() (bool, error) {
   144  		status, err := harness.InspectTask(task.ID)
   145  		if err != nil {
   146  			return false, fmt.Errorf("inspecting task failed: %v", err)
   147  		}
   148  		if status.State != drivers.TaskStateExited {
   149  			return false, fmt.Errorf("task hasn't exited yet; status: %v", status.State)
   150  		}
   151  
   152  		return true, nil
   153  	}, func(err error) {
   154  		require.NoError(err)
   155  	})
   156  
   157  	require.NoError(harness.DestroyTask(task.ID, true))
   158  }
   159  
   160  // Verifies waiting on task to exit cleanly
   161  func TestRktDriver_Start_Wait_Stop(t *testing.T) {
   162  	ctestutil.RktCompatible(t)
   163  	if !testutil.IsCI() {
   164  		t.Parallel()
   165  	}
   166  
   167  	require := require.New(t)
   168  	d := NewRktDriver(testlog.HCLogger(t))
   169  	harness := dtestutil.NewDriverHarness(t, d)
   170  
   171  	task := &drivers.TaskConfig{
   172  		ID:      uuid.Generate(),
   173  		AllocID: uuid.Generate(),
   174  		Name:    "etcd",
   175  		Resources: &drivers.Resources{
   176  			NomadResources: &structs.AllocatedTaskResources{
   177  				Memory: structs.AllocatedMemoryResources{
   178  					MemoryMB: 128,
   179  				},
   180  				Cpu: structs.AllocatedCpuResources{
   181  					CpuShares: 100,
   182  				},
   183  			},
   184  			LinuxResources: &drivers.LinuxResources{
   185  				MemoryLimitBytes: 134217728,
   186  				CPUShares:        100,
   187  			},
   188  		},
   189  	}
   190  
   191  	tc := &TaskConfig{
   192  		TrustPrefix: "coreos.com/etcd",
   193  		ImageName:   "coreos.com/etcd:v2.0.4",
   194  		Command:     "/etcd",
   195  		Args:        []string{"--version"},
   196  		Net:         []string{"none"},
   197  		Debug:       true,
   198  	}
   199  	require.NoError(task.EncodeConcreteDriverConfig(&tc))
   200  	cleanup := harness.MkAllocDir(task, true)
   201  	defer cleanup()
   202  
   203  	handle, _, err := harness.StartTask(task)
   204  	require.NoError(err)
   205  
   206  	// Wait on the task, it should exit since we are only asking for etcd version here
   207  	ch, err := harness.WaitTask(context.Background(), handle.Config.ID)
   208  	require.NoError(err)
   209  	result := <-ch
   210  	require.Nil(result.Err)
   211  
   212  	require.Zero(result.ExitCode)
   213  
   214  	require.NoError(harness.DestroyTask(task.ID, true))
   215  
   216  }
   217  
   218  // Verifies that skipping trust_prefix works
   219  func TestRktDriver_Start_Wait_Skip_Trust(t *testing.T) {
   220  	ctestutil.RktCompatible(t)
   221  	if !testutil.IsCI() {
   222  		t.Parallel()
   223  	}
   224  
   225  	require := require.New(t)
   226  	d := NewRktDriver(testlog.HCLogger(t))
   227  	harness := dtestutil.NewDriverHarness(t, d)
   228  
   229  	task := &drivers.TaskConfig{
   230  		ID:      uuid.Generate(),
   231  		AllocID: uuid.Generate(),
   232  		Name:    "etcd",
   233  		Resources: &drivers.Resources{
   234  			NomadResources: &structs.AllocatedTaskResources{
   235  				Memory: structs.AllocatedMemoryResources{
   236  					MemoryMB: 128,
   237  				},
   238  				Cpu: structs.AllocatedCpuResources{
   239  					CpuShares: 100,
   240  				},
   241  			},
   242  			LinuxResources: &drivers.LinuxResources{
   243  				MemoryLimitBytes: 134217728,
   244  				CPUShares:        100,
   245  			},
   246  		},
   247  	}
   248  
   249  	tc := &TaskConfig{
   250  		ImageName: "coreos.com/etcd:v2.0.4",
   251  		Command:   "/etcd",
   252  		Args:      []string{"--version"},
   253  		Net:       []string{"none"},
   254  		Debug:     true,
   255  	}
   256  	require.NoError(task.EncodeConcreteDriverConfig(&tc))
   257  	testtask.SetTaskConfigEnv(task)
   258  
   259  	cleanup := harness.MkAllocDir(task, true)
   260  	defer cleanup()
   261  
   262  	handle, _, err := harness.StartTask(task)
   263  	require.NoError(err)
   264  
   265  	// Wait on the task, it should exit since we are only asking for etcd version here
   266  	ch, err := harness.WaitTask(context.Background(), handle.Config.ID)
   267  	require.NoError(err)
   268  	result := <-ch
   269  	require.Nil(result.Err)
   270  	require.Zero(result.ExitCode)
   271  
   272  	require.NoError(harness.DestroyTask(task.ID, true))
   273  
   274  }
   275  
   276  // Verifies that an invalid trust prefix returns expected error
   277  func TestRktDriver_InvalidTrustPrefix(t *testing.T) {
   278  	ctestutil.RktCompatible(t)
   279  	if !testutil.IsCI() {
   280  		t.Parallel()
   281  	}
   282  
   283  	require := require.New(t)
   284  	d := NewRktDriver(testlog.HCLogger(t))
   285  	harness := dtestutil.NewDriverHarness(t, d)
   286  
   287  	task := &drivers.TaskConfig{
   288  		ID:      uuid.Generate(),
   289  		AllocID: uuid.Generate(),
   290  		Name:    "etcd",
   291  		Resources: &drivers.Resources{
   292  			NomadResources: &structs.AllocatedTaskResources{
   293  				Memory: structs.AllocatedMemoryResources{
   294  					MemoryMB: 128,
   295  				},
   296  				Cpu: structs.AllocatedCpuResources{
   297  					CpuShares: 100,
   298  				},
   299  			},
   300  			LinuxResources: &drivers.LinuxResources{
   301  				MemoryLimitBytes: 134217728,
   302  				CPUShares:        100,
   303  			},
   304  		},
   305  	}
   306  
   307  	tc := &TaskConfig{
   308  		TrustPrefix: "example.com/invalid",
   309  		ImageName:   "coreos.com/etcd:v2.0.4",
   310  		Command:     "/etcd",
   311  		Args:        []string{"--version"},
   312  		Net:         []string{"none"},
   313  		Debug:       true,
   314  	}
   315  	require.NoError(task.EncodeConcreteDriverConfig(&tc))
   316  	testtask.SetTaskConfigEnv(task)
   317  
   318  	cleanup := harness.MkAllocDir(task, true)
   319  	defer cleanup()
   320  
   321  	_, _, err := harness.StartTask(task)
   322  	require.Error(err)
   323  	expectedErr := "Error running rkt trust"
   324  	require.Contains(err.Error(), expectedErr)
   325  
   326  }
   327  
   328  // Verifies reattaching to a running container
   329  // This test manipulates the harness's internal state map
   330  // to remove the task and then reattaches to it
   331  func TestRktDriver_StartWaitRecoverWaitStop(t *testing.T) {
   332  	ctestutil.RktCompatible(t)
   333  	if !testutil.IsCI() {
   334  		t.Parallel()
   335  	}
   336  
   337  	require := require.New(t)
   338  	d := NewRktDriver(testlog.HCLogger(t))
   339  	harness := dtestutil.NewDriverHarness(t, d)
   340  
   341  	task := &drivers.TaskConfig{
   342  		ID:      uuid.Generate(),
   343  		AllocID: uuid.Generate(),
   344  		Name:    "etcd",
   345  		Resources: &drivers.Resources{
   346  			NomadResources: &structs.AllocatedTaskResources{
   347  				Memory: structs.AllocatedMemoryResources{
   348  					MemoryMB: 128,
   349  				},
   350  				Cpu: structs.AllocatedCpuResources{
   351  					CpuShares: 100,
   352  				},
   353  			},
   354  			LinuxResources: &drivers.LinuxResources{
   355  				MemoryLimitBytes: 134217728,
   356  				CPUShares:        100,
   357  			},
   358  		},
   359  	}
   360  
   361  	tc := &TaskConfig{
   362  		ImageName: "coreos.com/etcd:v2.0.4",
   363  		Command:   "/etcd",
   364  	}
   365  	require.NoError(task.EncodeConcreteDriverConfig(&tc))
   366  
   367  	cleanup := harness.MkAllocDir(task, true)
   368  	defer cleanup()
   369  
   370  	handle, _, err := harness.StartTask(task)
   371  	require.NoError(err)
   372  
   373  	ch, err := harness.WaitTask(context.Background(), task.ID)
   374  	require.NoError(err)
   375  
   376  	var waitDone bool
   377  	var wg sync.WaitGroup
   378  	wg.Add(1)
   379  	go func() {
   380  		defer wg.Done()
   381  		result := <-ch
   382  		require.Error(result.Err)
   383  		waitDone = true
   384  	}()
   385  
   386  	originalStatus, err := d.InspectTask(task.ID)
   387  	require.NoError(err)
   388  
   389  	d.(*Driver).tasks.Delete(task.ID)
   390  
   391  	wg.Wait()
   392  	require.True(waitDone)
   393  	_, err = d.InspectTask(task.ID)
   394  	require.Equal(drivers.ErrTaskNotFound, err)
   395  
   396  	err = d.RecoverTask(handle)
   397  	require.NoError(err)
   398  
   399  	status, err := d.InspectTask(task.ID)
   400  	require.NoError(err)
   401  	require.Exactly(originalStatus, status)
   402  
   403  	ch, err = harness.WaitTask(context.Background(), task.ID)
   404  	require.NoError(err)
   405  
   406  	require.NoError(d.StopTask(task.ID, 0, "SIGKILL"))
   407  
   408  	select {
   409  	case result := <-ch:
   410  		require.NoError(result.Err)
   411  		require.NotZero(result.ExitCode)
   412  
   413  		// when killing a task, signal might not propagate
   414  		// when executor proc.Wait() call gets "wait: no child processes" error
   415  		//require.Equal(9, result.Signal)
   416  	case <-time.After(time.Duration(testutil.TestMultiplier()*5) * time.Second):
   417  		require.Fail("WaitTask timeout")
   418  	}
   419  
   420  	require.NoError(d.DestroyTask(task.ID, false))
   421  }
   422  
   423  // Verifies mounting a volume from the host machine and writing
   424  // some data to it from inside the container
   425  func TestRktDriver_Start_Wait_Volume(t *testing.T) {
   426  	ctestutil.RktCompatible(t)
   427  	if !testutil.IsCI() {
   428  		t.Parallel()
   429  	}
   430  
   431  	require := require.New(t)
   432  	d := NewRktDriver(testlog.HCLogger(t))
   433  	harness := dtestutil.NewDriverHarness(t, d)
   434  
   435  	// enable volumes
   436  	config := &Config{VolumesEnabled: true}
   437  
   438  	var data []byte
   439  	require.NoError(basePlug.MsgPackEncode(&data, config))
   440  	bconfig := &basePlug.Config{PluginConfig: data}
   441  	require.NoError(harness.SetConfig(bconfig))
   442  
   443  	task := &drivers.TaskConfig{
   444  		ID:      uuid.Generate(),
   445  		AllocID: uuid.Generate(),
   446  		Name:    "rkttest_alpine",
   447  		Resources: &drivers.Resources{
   448  			NomadResources: &structs.AllocatedTaskResources{
   449  				Memory: structs.AllocatedMemoryResources{
   450  					MemoryMB: 128,
   451  				},
   452  				Cpu: structs.AllocatedCpuResources{
   453  					CpuShares: 100,
   454  				},
   455  			},
   456  			LinuxResources: &drivers.LinuxResources{
   457  				MemoryLimitBytes: 134217728,
   458  				CPUShares:        100,
   459  			},
   460  		},
   461  	}
   462  	exp := []byte{'w', 'i', 'n'}
   463  	file := "output.txt"
   464  	tmpvol, err := ioutil.TempDir("", "nomadtest_rktdriver_volumes")
   465  	require.NoError(err)
   466  	defer os.RemoveAll(tmpvol)
   467  	hostpath := filepath.Join(tmpvol, file)
   468  
   469  	tc := &TaskConfig{
   470  		ImageName: "docker://redis:3.2-alpine",
   471  		Command:   "/bin/sh",
   472  		Args: []string{
   473  			"-c",
   474  			fmt.Sprintf("echo -n %s > /foo/%s", string(exp), file),
   475  		},
   476  		Net:     []string{"none"},
   477  		Volumes: []string{fmt.Sprintf("%s:/foo", tmpvol)},
   478  	}
   479  	require.NoError(task.EncodeConcreteDriverConfig(&tc))
   480  
   481  	testtask.SetTaskConfigEnv(task)
   482  
   483  	cleanup := harness.MkAllocDir(task, true)
   484  	defer cleanup()
   485  
   486  	_, _, err = harness.StartTask(task)
   487  	require.NoError(err)
   488  
   489  	// Task should terminate quickly
   490  	waitCh, err := harness.WaitTask(context.Background(), task.ID)
   491  	require.NoError(err)
   492  
   493  	select {
   494  	case res := <-waitCh:
   495  		require.NoError(res.Err)
   496  		require.True(res.Successful(), fmt.Sprintf("exit code %v", res.ExitCode))
   497  	case <-time.After(time.Duration(testutil.TestMultiplier()*5) * time.Second):
   498  		require.Fail("WaitTask timeout")
   499  	}
   500  
   501  	// Check that data was written to the shared alloc directory.
   502  	act, err := ioutil.ReadFile(hostpath)
   503  	require.NoError(err)
   504  	require.Exactly(exp, act)
   505  	require.NoError(harness.DestroyTask(task.ID, true))
   506  }
   507  
   508  // Verifies mounting a task mount from the host machine and writing
   509  // some data to it from inside the container
   510  func TestRktDriver_Start_Wait_TaskMounts(t *testing.T) {
   511  	ctestutil.RktCompatible(t)
   512  	if !testutil.IsCI() {
   513  		t.Parallel()
   514  	}
   515  
   516  	require := require.New(t)
   517  	d := NewRktDriver(testlog.HCLogger(t))
   518  	harness := dtestutil.NewDriverHarness(t, d)
   519  
   520  	// mounts through task config should be enabled regardless
   521  	config := &Config{VolumesEnabled: false}
   522  
   523  	var data []byte
   524  	require.NoError(basePlug.MsgPackEncode(&data, config))
   525  	bconfig := &basePlug.Config{PluginConfig: data}
   526  	require.NoError(harness.SetConfig(bconfig))
   527  
   528  	tmpvol, err := ioutil.TempDir("", "nomadtest_rktdriver_volumes")
   529  	require.NoError(err)
   530  	defer os.RemoveAll(tmpvol)
   531  
   532  	task := &drivers.TaskConfig{
   533  		ID:      uuid.Generate(),
   534  		AllocID: uuid.Generate(),
   535  		Name:    "rkttest_alpine",
   536  		Resources: &drivers.Resources{
   537  			NomadResources: &structs.AllocatedTaskResources{
   538  				Memory: structs.AllocatedMemoryResources{
   539  					MemoryMB: 128,
   540  				},
   541  				Cpu: structs.AllocatedCpuResources{
   542  					CpuShares: 100,
   543  				},
   544  			},
   545  			LinuxResources: &drivers.LinuxResources{
   546  				MemoryLimitBytes: 134217728,
   547  				CPUShares:        100,
   548  			},
   549  		},
   550  		Mounts: []*drivers.MountConfig{
   551  			{HostPath: tmpvol, TaskPath: "/foo", Readonly: false},
   552  		},
   553  	}
   554  	exp := []byte{'w', 'i', 'n'}
   555  	file := "output.txt"
   556  	hostpath := filepath.Join(tmpvol, file)
   557  
   558  	tc := &TaskConfig{
   559  		ImageName: "docker://redis:3.2-alpine",
   560  		Command:   "/bin/sh",
   561  		Args: []string{
   562  			"-c",
   563  			fmt.Sprintf("echo -n %s > /foo/%s", string(exp), file),
   564  		},
   565  		Net: []string{"none"},
   566  	}
   567  	require.NoError(task.EncodeConcreteDriverConfig(&tc))
   568  	testtask.SetTaskConfigEnv(task)
   569  
   570  	cleanup := harness.MkAllocDir(task, true)
   571  	defer cleanup()
   572  
   573  	_, _, err = harness.StartTask(task)
   574  	require.NoError(err)
   575  
   576  	// Task should terminate quickly
   577  	waitCh, err := harness.WaitTask(context.Background(), task.ID)
   578  	require.NoError(err)
   579  
   580  	select {
   581  	case res := <-waitCh:
   582  		require.NoError(res.Err)
   583  		require.True(res.Successful(), fmt.Sprintf("exit code %v", res.ExitCode))
   584  	case <-time.After(time.Duration(testutil.TestMultiplier()*5) * time.Second):
   585  		require.Fail("WaitTask timeout")
   586  	}
   587  
   588  	// Check that data was written to the shared alloc directory.
   589  	act, err := ioutil.ReadFile(hostpath)
   590  	require.NoError(err)
   591  	require.Exactly(exp, act)
   592  	require.NoError(harness.DestroyTask(task.ID, true))
   593  }
   594  
   595  // Verifies port mapping
   596  func TestRktDriver_PortMapping(t *testing.T) {
   597  	ctestutil.RktCompatible(t)
   598  
   599  	require := require.New(t)
   600  	d := NewRktDriver(testlog.HCLogger(t))
   601  	harness := dtestutil.NewDriverHarness(t, d)
   602  
   603  	task := &drivers.TaskConfig{
   604  		ID:      uuid.Generate(),
   605  		AllocID: uuid.Generate(),
   606  		Name:    "redis",
   607  		Resources: &drivers.Resources{
   608  			NomadResources: &structs.AllocatedTaskResources{
   609  				Memory: structs.AllocatedMemoryResources{
   610  					MemoryMB: 128,
   611  				},
   612  				Cpu: structs.AllocatedCpuResources{
   613  					CpuShares: 100,
   614  				},
   615  				Networks: []*structs.NetworkResource{
   616  					{
   617  						IP:            "127.0.0.1",
   618  						ReservedPorts: []structs.Port{{Label: "main", Value: 8080}},
   619  					},
   620  				},
   621  			},
   622  			LinuxResources: &drivers.LinuxResources{
   623  				MemoryLimitBytes: 134217728,
   624  				CPUShares:        100,
   625  			},
   626  		},
   627  	}
   628  
   629  	tc := &TaskConfig{
   630  		ImageName: "docker://redis:3.2-alpine",
   631  		PortMap: map[string]string{
   632  			"main": "6379-tcp",
   633  		},
   634  		Debug: true,
   635  	}
   636  	require.NoError(task.EncodeConcreteDriverConfig(&tc))
   637  
   638  	cleanup := harness.MkAllocDir(task, true)
   639  	defer cleanup()
   640  
   641  	_, driverNetwork, err := harness.StartTask(task)
   642  	require.NoError(err)
   643  	require.NotNil(driverNetwork)
   644  	require.NoError(harness.DestroyTask(task.ID, true))
   645  }
   646  
   647  // This test starts a redis container, setting user and group.
   648  // It verifies that running ps inside the container shows the expected user and group
   649  func TestRktDriver_UserGroup(t *testing.T) {
   650  	ctestutil.RktCompatible(t)
   651  	if !testutil.IsCI() {
   652  		t.Parallel()
   653  	}
   654  
   655  	require := require.New(t)
   656  	d := NewRktDriver(testlog.HCLogger(t))
   657  	harness := dtestutil.NewDriverHarness(t, d)
   658  
   659  	task := &drivers.TaskConfig{
   660  		ID:      uuid.Generate(),
   661  		AllocID: uuid.Generate(),
   662  		User:    "nobody",
   663  		Name:    "rkttest_alpine",
   664  		Resources: &drivers.Resources{
   665  			NomadResources: &structs.AllocatedTaskResources{
   666  				Memory: structs.AllocatedMemoryResources{
   667  					MemoryMB: 128,
   668  				},
   669  				Cpu: structs.AllocatedCpuResources{
   670  					CpuShares: 100,
   671  				},
   672  			},
   673  			LinuxResources: &drivers.LinuxResources{
   674  				MemoryLimitBytes: 134217728,
   675  				CPUShares:        100,
   676  			},
   677  		},
   678  	}
   679  
   680  	tc := &TaskConfig{
   681  		ImageName: "docker://redis:3.2-alpine",
   682  		Group:     "nogroup",
   683  		Command:   "sleep",
   684  		Args:      []string{"9000"},
   685  		Net:       []string{"none"},
   686  	}
   687  	require.NoError(task.EncodeConcreteDriverConfig(&tc))
   688  
   689  	testtask.SetTaskConfigEnv(task)
   690  
   691  	cleanup := harness.MkAllocDir(task, true)
   692  	defer cleanup()
   693  
   694  	_, _, err := harness.StartTask(task)
   695  	require.NoError(err)
   696  
   697  	expected := []byte("\nnobody   nogroup  /bin/sleep 9000\n")
   698  	testutil.WaitForResult(func() (bool, error) {
   699  		res, err := d.ExecTask(task.ID, []string{"ps", "-o", "user,group,args"}, time.Second)
   700  		if err != nil {
   701  			return false, fmt.Errorf("failed to exec: %#v", err)
   702  		}
   703  		if !res.ExitResult.Successful() {
   704  			return false, fmt.Errorf("ps failed: %#v %#v", res.ExitResult, res)
   705  		}
   706  		raw := res.Stdout
   707  		return bytes.Contains(raw, expected), fmt.Errorf("expected %q but found:\n%s", expected, raw)
   708  	}, func(err error) {
   709  		require.NoError(err)
   710  	})
   711  
   712  	require.NoError(harness.DestroyTask(task.ID, true))
   713  }
   714  
   715  //  Verifies executing both correct and incorrect commands inside the container
   716  func TestRktDriver_Exec(t *testing.T) {
   717  	ctestutil.RktCompatible(t)
   718  	if !testutil.IsCI() {
   719  		t.Parallel()
   720  	}
   721  
   722  	require := require.New(t)
   723  	d := NewRktDriver(testlog.HCLogger(t))
   724  	harness := dtestutil.NewDriverHarness(t, d)
   725  
   726  	task := &drivers.TaskConfig{
   727  		ID:      uuid.Generate(),
   728  		AllocID: uuid.Generate(),
   729  		Name:    "etcd",
   730  		Resources: &drivers.Resources{
   731  			NomadResources: &structs.AllocatedTaskResources{
   732  				Memory: structs.AllocatedMemoryResources{
   733  					MemoryMB: 128,
   734  				},
   735  				Cpu: structs.AllocatedCpuResources{
   736  					CpuShares: 100,
   737  				},
   738  			},
   739  			LinuxResources: &drivers.LinuxResources{
   740  				MemoryLimitBytes: 134217728,
   741  				CPUShares:        100,
   742  			},
   743  		},
   744  	}
   745  
   746  	tc := &TaskConfig{
   747  		TrustPrefix: "coreos.com/etcd",
   748  		ImageName:   "coreos.com/etcd:v2.0.4",
   749  		Net:         []string{"none"},
   750  	}
   751  	require.NoError(task.EncodeConcreteDriverConfig(&tc))
   752  	testtask.SetTaskConfigEnv(task)
   753  
   754  	cleanup := harness.MkAllocDir(task, true)
   755  	defer cleanup()
   756  
   757  	_, _, err := harness.StartTask(task)
   758  	require.NoError(err)
   759  
   760  	// Run command that should succeed
   761  	expected := []byte("etcd version")
   762  	testutil.WaitForResult(func() (bool, error) {
   763  		res, err := d.ExecTask(task.ID, []string{"/etcd", "--version"}, time.Second)
   764  		if err != nil {
   765  			return false, fmt.Errorf("failed to exec: %#v", err)
   766  		}
   767  		if !res.ExitResult.Successful() {
   768  			return false, fmt.Errorf("/etcd --version failed: %#v %#v", res.ExitResult, res)
   769  		}
   770  		raw := res.Stdout
   771  		return bytes.Contains(raw, expected), fmt.Errorf("expected %q but found:\n%s", expected, raw)
   772  	}, func(err error) {
   773  		require.NoError(err)
   774  	})
   775  
   776  	// Run command that should fail
   777  	expected = []byte("flag provided but not defined")
   778  	testutil.WaitForResult(func() (bool, error) {
   779  		res, err := d.ExecTask(task.ID, []string{"/etcd", "--cgdfgdfg"}, time.Second)
   780  		if err != nil {
   781  			return false, fmt.Errorf("failed to exec: %#v", err)
   782  		}
   783  		if res.ExitResult.Successful() {
   784  			return false, fmt.Errorf("/etcd --cgdfgdfg unexpected succeeded: %#v %#v", res.ExitResult, res)
   785  		}
   786  		raw := res.Stdout
   787  		return bytes.Contains(raw, expected), fmt.Errorf("expected %q but found:\n%s", expected, raw)
   788  	}, func(err error) {
   789  		require.NoError(err)
   790  	})
   791  
   792  	require.NoError(harness.DestroyTask(task.ID, true))
   793  }
   794  
   795  //  Verifies getting resource usage stats
   796  // TODO(preetha) figure out why stats are zero
   797  func TestRktDriver_Stats(t *testing.T) {
   798  	ctestutil.RktCompatible(t)
   799  	if !testutil.IsCI() {
   800  		t.Parallel()
   801  	}
   802  
   803  	require := require.New(t)
   804  	d := NewRktDriver(testlog.HCLogger(t))
   805  	harness := dtestutil.NewDriverHarness(t, d)
   806  
   807  	task := &drivers.TaskConfig{
   808  		ID:      uuid.Generate(),
   809  		AllocID: uuid.Generate(),
   810  		Name:    "etcd",
   811  		Resources: &drivers.Resources{
   812  			NomadResources: &structs.AllocatedTaskResources{
   813  				Memory: structs.AllocatedMemoryResources{
   814  					MemoryMB: 128,
   815  				},
   816  				Cpu: structs.AllocatedCpuResources{
   817  					CpuShares: 100,
   818  				},
   819  			},
   820  			LinuxResources: &drivers.LinuxResources{
   821  				MemoryLimitBytes: 134217728,
   822  				CPUShares:        100,
   823  			},
   824  		},
   825  	}
   826  
   827  	tc := &TaskConfig{
   828  		TrustPrefix: "coreos.com/etcd",
   829  		ImageName:   "coreos.com/etcd:v2.0.4",
   830  		Command:     "/etcd",
   831  		Net:         []string{"none"},
   832  	}
   833  	require.NoError(task.EncodeConcreteDriverConfig(&tc))
   834  	testtask.SetTaskConfigEnv(task)
   835  
   836  	cleanup := harness.MkAllocDir(task, true)
   837  	defer cleanup()
   838  
   839  	handle, _, err := harness.StartTask(task)
   840  	require.NoError(err)
   841  
   842  	// Wait for task to start
   843  	_, err = harness.WaitTask(context.Background(), handle.Config.ID)
   844  	require.NoError(err)
   845  
   846  	// Wait until task started
   847  	require.NoError(harness.WaitUntilStarted(task.ID, 1*time.Second))
   848  
   849  	ctx, cancel := context.WithCancel(context.Background())
   850  	defer cancel()
   851  	statsCh, err := d.TaskStats(ctx, task.ID, time.Second*10)
   852  	require.Nil(err)
   853  
   854  	select {
   855  	case ru := <-statsCh:
   856  		//TODO(preetha) why are these zero
   857  		fmt.Printf("pid map %v\n", ru.Pids)
   858  		fmt.Printf("CPU:%+v Memory:%+v", ru.ResourceUsage.CpuStats, ru.ResourceUsage.MemoryStats)
   859  	case <-time.After(time.Second):
   860  		require.Fail("timeout receiving stats from channel")
   861  	}
   862  
   863  	require.NoError(harness.DestroyTask(task.ID, true))
   864  
   865  }
   866  
   867  func TestConfig_ParseAllHCL(t *testing.T) {
   868  	cfgStr := `
   869  config {
   870    image = "docker://redis:3.2"
   871    command = "/bin/bash"
   872    args = ["-c", "echo hi"]
   873    trust_prefix = "coreos.com/etcd"
   874    dns_servers = ["8.8.8.8"]
   875    dns_search_domains = ["example.com"]
   876    net = ["network1"]
   877    port_map {
   878      http = "80-tcp"
   879      https = "443-tcp"
   880    }
   881    volumes = [
   882      "/host-path:/container-path",
   883    ]
   884    insecure_options = ["image", "tls", "ondisk"]
   885    no_overlay = true
   886    debug = true
   887    group = "mygroup"
   888  }`
   889  
   890  	expected := &TaskConfig{
   891  		ImageName:        "docker://redis:3.2",
   892  		Command:          "/bin/bash",
   893  		Args:             []string{"-c", "echo hi"},
   894  		TrustPrefix:      "coreos.com/etcd",
   895  		DNSServers:       []string{"8.8.8.8"},
   896  		DNSSearchDomains: []string{"example.com"},
   897  		Net:              []string{"network1"},
   898  		PortMap: map[string]string{
   899  			"http":  "80-tcp",
   900  			"https": "443-tcp",
   901  		},
   902  		Volumes: []string{
   903  			"/host-path:/container-path",
   904  		},
   905  		InsecureOptions: []string{"image", "tls", "ondisk"},
   906  		NoOverlay:       true,
   907  		Debug:           true,
   908  		Group:           "mygroup",
   909  	}
   910  
   911  	var tc *TaskConfig
   912  	hclutils.NewConfigParser(taskConfigSpec).ParseHCL(t, cfgStr, &tc)
   913  
   914  	require.EqualValues(t, expected, tc)
   915  }
   916  
   917  func TestRkt_ExecTaskStreaming(t *testing.T) {
   918  	ctestutil.RktCompatible(t)
   919  	if !testutil.IsCI() {
   920  		t.Parallel()
   921  	}
   922  
   923  	require := require.New(t)
   924  	d := NewRktDriver(testlog.HCLogger(t))
   925  	harness := dtestutil.NewDriverHarness(t, d)
   926  
   927  	task := &drivers.TaskConfig{
   928  		ID:      uuid.Generate(),
   929  		AllocID: uuid.Generate(),
   930  		Name:    "etcd",
   931  		Resources: &drivers.Resources{
   932  			NomadResources: &structs.AllocatedTaskResources{
   933  				Memory: structs.AllocatedMemoryResources{
   934  					MemoryMB: 128,
   935  				},
   936  				Cpu: structs.AllocatedCpuResources{
   937  					CpuShares: 100,
   938  				},
   939  			},
   940  			LinuxResources: &drivers.LinuxResources{
   941  				MemoryLimitBytes: 134217728,
   942  				CPUShares:        100,
   943  			},
   944  		},
   945  	}
   946  
   947  	tc := &TaskConfig{
   948  		ImageName: "docker://busybox:1.29.3",
   949  		Command:   "/bin/sleep",
   950  		Args:      []string{"1000"},
   951  		Net:       []string{"none"},
   952  	}
   953  	require.NoError(task.EncodeConcreteDriverConfig(&tc))
   954  	testtask.SetTaskConfigEnv(task)
   955  
   956  	cleanup := harness.MkAllocDir(task, true)
   957  	defer cleanup()
   958  
   959  	_, _, err := harness.StartTask(task)
   960  	require.NoError(err)
   961  	defer d.DestroyTask(task.ID, true)
   962  
   963  	// wait for container to be up and executable
   964  	testutil.WaitForResult(func() (bool, error) {
   965  		res, err := d.ExecTask(task.ID, []string{"/bin/sh", "-c", "echo hi"}, time.Second)
   966  		if err != nil {
   967  			return false, fmt.Errorf("failed to exec: %#v", err)
   968  		}
   969  		if !res.ExitResult.Successful() {
   970  			return false, fmt.Errorf("ps failed: %#v %#v", res.ExitResult, res)
   971  		}
   972  		return true, nil
   973  	}, func(err error) {
   974  		require.NoError(err)
   975  	})
   976  
   977  	dtestutil.ExecTaskStreamingConformanceTests(t, harness, task.ID)
   978  
   979  }