github.com/jfrazelle/docker@v1.1.2-0.20210712172922-bf78e25fe508/integration/service/create_test.go (about)

     1  package service // import "github.com/docker/docker/integration/service"
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"io/ioutil"
     7  	"strings"
     8  	"testing"
     9  	"time"
    10  
    11  	"github.com/docker/docker/api/types"
    12  	"github.com/docker/docker/api/types/filters"
    13  	"github.com/docker/docker/api/types/strslice"
    14  	swarmtypes "github.com/docker/docker/api/types/swarm"
    15  	"github.com/docker/docker/api/types/versions"
    16  	"github.com/docker/docker/client"
    17  	"github.com/docker/docker/errdefs"
    18  	"github.com/docker/docker/integration/internal/network"
    19  	"github.com/docker/docker/integration/internal/swarm"
    20  	"github.com/docker/docker/testutil/daemon"
    21  	"gotest.tools/v3/assert"
    22  	is "gotest.tools/v3/assert/cmp"
    23  	"gotest.tools/v3/poll"
    24  	"gotest.tools/v3/skip"
    25  )
    26  
    27  func TestServiceCreateInit(t *testing.T) {
    28  	defer setupTest(t)()
    29  	t.Run("daemonInitDisabled", testServiceCreateInit(false))
    30  	t.Run("daemonInitEnabled", testServiceCreateInit(true))
    31  }
    32  
    33  func testServiceCreateInit(daemonEnabled bool) func(t *testing.T) {
    34  	return func(t *testing.T) {
    35  		var ops = []daemon.Option{}
    36  
    37  		if daemonEnabled {
    38  			ops = append(ops, daemon.WithInit())
    39  		}
    40  		d := swarm.NewSwarm(t, testEnv, ops...)
    41  		defer d.Stop(t)
    42  		client := d.NewClientT(t)
    43  		defer client.Close()
    44  
    45  		booleanTrue := true
    46  		booleanFalse := false
    47  
    48  		serviceID := swarm.CreateService(t, d)
    49  		poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll)
    50  		i := inspectServiceContainer(t, client, serviceID)
    51  		// HostConfig.Init == nil means that it delegates to daemon configuration
    52  		assert.Check(t, i.HostConfig.Init == nil)
    53  
    54  		serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanTrue))
    55  		poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll)
    56  		i = inspectServiceContainer(t, client, serviceID)
    57  		assert.Check(t, is.Equal(true, *i.HostConfig.Init))
    58  
    59  		serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanFalse))
    60  		poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll)
    61  		i = inspectServiceContainer(t, client, serviceID)
    62  		assert.Check(t, is.Equal(false, *i.HostConfig.Init))
    63  	}
    64  }
    65  
    66  func inspectServiceContainer(t *testing.T, client client.APIClient, serviceID string) types.ContainerJSON {
    67  	t.Helper()
    68  	filter := filters.NewArgs()
    69  	filter.Add("label", fmt.Sprintf("com.docker.swarm.service.id=%s", serviceID))
    70  	containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{Filters: filter})
    71  	assert.NilError(t, err)
    72  	assert.Check(t, is.Len(containers, 1))
    73  
    74  	i, err := client.ContainerInspect(context.Background(), containers[0].ID)
    75  	assert.NilError(t, err)
    76  	return i
    77  }
    78  
    79  func TestCreateServiceMultipleTimes(t *testing.T) {
    80  	skip.If(t, testEnv.DaemonInfo.OSType == "windows")
    81  	defer setupTest(t)()
    82  	d := swarm.NewSwarm(t, testEnv)
    83  	defer d.Stop(t)
    84  	client := d.NewClientT(t)
    85  	defer client.Close()
    86  	ctx := context.Background()
    87  
    88  	overlayName := "overlay1_" + t.Name()
    89  	overlayID := network.CreateNoError(ctx, t, client, overlayName,
    90  		network.WithCheckDuplicate(),
    91  		network.WithDriver("overlay"),
    92  	)
    93  
    94  	var instances uint64 = 4
    95  
    96  	serviceName := "TestService_" + t.Name()
    97  	serviceSpec := []swarm.ServiceSpecOpt{
    98  		swarm.ServiceWithReplicas(instances),
    99  		swarm.ServiceWithName(serviceName),
   100  		swarm.ServiceWithNetwork(overlayName),
   101  	}
   102  
   103  	serviceID := swarm.CreateService(t, d, serviceSpec...)
   104  	poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll)
   105  
   106  	_, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{})
   107  	assert.NilError(t, err)
   108  
   109  	err = client.ServiceRemove(context.Background(), serviceID)
   110  	assert.NilError(t, err)
   111  
   112  	poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll)
   113  
   114  	serviceID2 := swarm.CreateService(t, d, serviceSpec...)
   115  	poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID2, instances), swarm.ServicePoll)
   116  
   117  	err = client.ServiceRemove(context.Background(), serviceID2)
   118  	assert.NilError(t, err)
   119  
   120  	// we can't just wait on no tasks for the service, counter-intuitively.
   121  	// Tasks may briefly exist but not show up, if they are are in the process
   122  	// of being deallocated. To avoid this case, we should retry network remove
   123  	// a few times, to give tasks time to be deallcoated
   124  	poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID2), swarm.ServicePoll)
   125  
   126  	for retry := 0; retry < 5; retry++ {
   127  		err = client.NetworkRemove(context.Background(), overlayID)
   128  		// TODO(dperny): using strings.Contains for error checking is awful,
   129  		// but so is the fact that swarm functions don't return errdefs errors.
   130  		// I don't have time at this moment to fix the latter, so I guess I'll
   131  		// go with the former.
   132  		//
   133  		// The full error we're looking for is something like this:
   134  		//
   135  		// Error response from daemon: rpc error: code = FailedPrecondition desc = network %v is in use by task %v
   136  		//
   137  		// The safest way to catch this, I think, will be to match on "is in
   138  		// use by", as this is an uninterrupted string that best identifies
   139  		// this error.
   140  		if err == nil || !strings.Contains(err.Error(), "is in use by") {
   141  			// if there is no error, or the error isn't this kind of error,
   142  			// then we'll break the loop body, and either fail the test or
   143  			// continue.
   144  			break
   145  		}
   146  	}
   147  	assert.NilError(t, err)
   148  
   149  	poll.WaitOn(t, network.IsRemoved(context.Background(), client, overlayID), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second))
   150  }
   151  
   152  func TestCreateServiceConflict(t *testing.T) {
   153  	skip.If(t, testEnv.DaemonInfo.OSType == "windows")
   154  	defer setupTest(t)()
   155  	d := swarm.NewSwarm(t, testEnv)
   156  	defer d.Stop(t)
   157  	c := d.NewClientT(t)
   158  	defer c.Close()
   159  	ctx := context.Background()
   160  
   161  	serviceName := "TestService_" + t.Name()
   162  	serviceSpec := []swarm.ServiceSpecOpt{
   163  		swarm.ServiceWithName(serviceName),
   164  	}
   165  
   166  	swarm.CreateService(t, d, serviceSpec...)
   167  
   168  	spec := swarm.CreateServiceSpec(t, serviceSpec...)
   169  	_, err := c.ServiceCreate(ctx, spec, types.ServiceCreateOptions{})
   170  	assert.Check(t, errdefs.IsConflict(err))
   171  	assert.ErrorContains(t, err, "service "+serviceName+" already exists")
   172  }
   173  
   174  func TestCreateServiceMaxReplicas(t *testing.T) {
   175  	defer setupTest(t)()
   176  	d := swarm.NewSwarm(t, testEnv)
   177  	defer d.Stop(t)
   178  	client := d.NewClientT(t)
   179  	defer client.Close()
   180  
   181  	var maxReplicas uint64 = 2
   182  	serviceSpec := []swarm.ServiceSpecOpt{
   183  		swarm.ServiceWithReplicas(maxReplicas),
   184  		swarm.ServiceWithMaxReplicas(maxReplicas),
   185  	}
   186  
   187  	serviceID := swarm.CreateService(t, d, serviceSpec...)
   188  	poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, maxReplicas), swarm.ServicePoll)
   189  
   190  	_, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{})
   191  	assert.NilError(t, err)
   192  }
   193  
   194  func TestCreateWithDuplicateNetworkNames(t *testing.T) {
   195  	skip.If(t, testEnv.DaemonInfo.OSType == "windows")
   196  	defer setupTest(t)()
   197  	d := swarm.NewSwarm(t, testEnv)
   198  	defer d.Stop(t)
   199  	client := d.NewClientT(t)
   200  	defer client.Close()
   201  	ctx := context.Background()
   202  
   203  	name := "foo_" + t.Name()
   204  	n1 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge"))
   205  	n2 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge"))
   206  
   207  	// Duplicates with name but with different driver
   208  	n3 := network.CreateNoError(ctx, t, client, name, network.WithDriver("overlay"))
   209  
   210  	// Create Service with the same name
   211  	var instances uint64 = 1
   212  
   213  	serviceName := "top_" + t.Name()
   214  	serviceID := swarm.CreateService(t, d,
   215  		swarm.ServiceWithReplicas(instances),
   216  		swarm.ServiceWithName(serviceName),
   217  		swarm.ServiceWithNetwork(name),
   218  	)
   219  
   220  	poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll)
   221  
   222  	resp, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{})
   223  	assert.NilError(t, err)
   224  	assert.Check(t, is.Equal(n3, resp.Spec.TaskTemplate.Networks[0].Target))
   225  
   226  	// Remove Service, and wait for its tasks to be removed
   227  	err = client.ServiceRemove(ctx, serviceID)
   228  	assert.NilError(t, err)
   229  	poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll)
   230  
   231  	// Remove networks
   232  	err = client.NetworkRemove(context.Background(), n3)
   233  	assert.NilError(t, err)
   234  
   235  	err = client.NetworkRemove(context.Background(), n2)
   236  	assert.NilError(t, err)
   237  
   238  	err = client.NetworkRemove(context.Background(), n1)
   239  	assert.NilError(t, err)
   240  
   241  	// Make sure networks have been destroyed.
   242  	poll.WaitOn(t, network.IsRemoved(context.Background(), client, n3), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second))
   243  	poll.WaitOn(t, network.IsRemoved(context.Background(), client, n2), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second))
   244  	poll.WaitOn(t, network.IsRemoved(context.Background(), client, n1), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second))
   245  }
   246  
   247  func TestCreateServiceSecretFileMode(t *testing.T) {
   248  	skip.If(t, testEnv.DaemonInfo.OSType == "windows")
   249  	defer setupTest(t)()
   250  	d := swarm.NewSwarm(t, testEnv)
   251  	defer d.Stop(t)
   252  	client := d.NewClientT(t)
   253  	defer client.Close()
   254  
   255  	ctx := context.Background()
   256  	secretName := "TestSecret_" + t.Name()
   257  	secretResp, err := client.SecretCreate(ctx, swarmtypes.SecretSpec{
   258  		Annotations: swarmtypes.Annotations{
   259  			Name: secretName,
   260  		},
   261  		Data: []byte("TESTSECRET"),
   262  	})
   263  	assert.NilError(t, err)
   264  
   265  	var instances uint64 = 1
   266  	serviceName := "TestService_" + t.Name()
   267  	serviceID := swarm.CreateService(t, d,
   268  		swarm.ServiceWithReplicas(instances),
   269  		swarm.ServiceWithName(serviceName),
   270  		swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/secret || /bin/top"}),
   271  		swarm.ServiceWithSecret(&swarmtypes.SecretReference{
   272  			File: &swarmtypes.SecretReferenceFileTarget{
   273  				Name: "/etc/secret",
   274  				UID:  "0",
   275  				GID:  "0",
   276  				Mode: 0777,
   277  			},
   278  			SecretID:   secretResp.ID,
   279  			SecretName: secretName,
   280  		}),
   281  	)
   282  
   283  	poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll)
   284  
   285  	filter := filters.NewArgs()
   286  	filter.Add("service", serviceID)
   287  	tasks, err := client.TaskList(ctx, types.TaskListOptions{
   288  		Filters: filter,
   289  	})
   290  	assert.NilError(t, err)
   291  	assert.Check(t, is.Equal(len(tasks), 1))
   292  
   293  	body, err := client.ContainerLogs(ctx, tasks[0].Status.ContainerStatus.ContainerID, types.ContainerLogsOptions{
   294  		ShowStdout: true,
   295  	})
   296  	assert.NilError(t, err)
   297  	defer body.Close()
   298  
   299  	content, err := ioutil.ReadAll(body)
   300  	assert.NilError(t, err)
   301  	assert.Check(t, is.Contains(string(content), "-rwxrwxrwx"))
   302  
   303  	err = client.ServiceRemove(ctx, serviceID)
   304  	assert.NilError(t, err)
   305  	poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll)
   306  
   307  	err = client.SecretRemove(ctx, secretName)
   308  	assert.NilError(t, err)
   309  }
   310  
   311  func TestCreateServiceConfigFileMode(t *testing.T) {
   312  	skip.If(t, testEnv.DaemonInfo.OSType == "windows")
   313  	defer setupTest(t)()
   314  	d := swarm.NewSwarm(t, testEnv)
   315  	defer d.Stop(t)
   316  	client := d.NewClientT(t)
   317  	defer client.Close()
   318  
   319  	ctx := context.Background()
   320  	configName := "TestConfig_" + t.Name()
   321  	configResp, err := client.ConfigCreate(ctx, swarmtypes.ConfigSpec{
   322  		Annotations: swarmtypes.Annotations{
   323  			Name: configName,
   324  		},
   325  		Data: []byte("TESTCONFIG"),
   326  	})
   327  	assert.NilError(t, err)
   328  
   329  	var instances uint64 = 1
   330  	serviceName := "TestService_" + t.Name()
   331  	serviceID := swarm.CreateService(t, d,
   332  		swarm.ServiceWithName(serviceName),
   333  		swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/config || /bin/top"}),
   334  		swarm.ServiceWithReplicas(instances),
   335  		swarm.ServiceWithConfig(&swarmtypes.ConfigReference{
   336  			File: &swarmtypes.ConfigReferenceFileTarget{
   337  				Name: "/etc/config",
   338  				UID:  "0",
   339  				GID:  "0",
   340  				Mode: 0777,
   341  			},
   342  			ConfigID:   configResp.ID,
   343  			ConfigName: configName,
   344  		}),
   345  	)
   346  
   347  	poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances))
   348  
   349  	filter := filters.NewArgs()
   350  	filter.Add("service", serviceID)
   351  	tasks, err := client.TaskList(ctx, types.TaskListOptions{
   352  		Filters: filter,
   353  	})
   354  	assert.NilError(t, err)
   355  	assert.Check(t, is.Equal(len(tasks), 1))
   356  
   357  	body, err := client.ContainerLogs(ctx, tasks[0].Status.ContainerStatus.ContainerID, types.ContainerLogsOptions{
   358  		ShowStdout: true,
   359  	})
   360  	assert.NilError(t, err)
   361  	defer body.Close()
   362  
   363  	content, err := ioutil.ReadAll(body)
   364  	assert.NilError(t, err)
   365  	assert.Check(t, is.Contains(string(content), "-rwxrwxrwx"))
   366  
   367  	err = client.ServiceRemove(ctx, serviceID)
   368  	assert.NilError(t, err)
   369  	poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID))
   370  
   371  	err = client.ConfigRemove(ctx, configName)
   372  	assert.NilError(t, err)
   373  }
   374  
   375  // TestServiceCreateSysctls tests that a service created with sysctl options in
   376  // the ContainerSpec correctly applies those options.
   377  //
   378  // To test this, we're going to create a service with the sysctl option
   379  //
   380  //   {"net.ipv4.ip_nonlocal_bind": "0"}
   381  //
   382  // We'll get the service's tasks to get the container ID, and then we'll
   383  // inspect the container. If the output of the container inspect contains the
   384  // sysctl option with the correct value, we can assume that the sysctl has been
   385  // plumbed correctly.
   386  //
   387  // Next, we'll remove that service and create a new service with that option
   388  // set to 1. This means that no matter what the default is, we can be confident
   389  // that the sysctl option is applying as intended.
   390  //
   391  // Additionally, we'll do service and task inspects to verify that the inspect
   392  // output includes the desired sysctl option.
   393  //
   394  // We're using net.ipv4.ip_nonlocal_bind because it's something that I'm fairly
   395  // confident won't be modified by the container runtime, and won't blow
   396  // anything up in the test environment
   397  func TestCreateServiceSysctls(t *testing.T) {
   398  	skip.If(
   399  		t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.40"),
   400  		"setting service sysctls is unsupported before api v1.40",
   401  	)
   402  
   403  	defer setupTest(t)()
   404  	d := swarm.NewSwarm(t, testEnv)
   405  	defer d.Stop(t)
   406  	client := d.NewClientT(t)
   407  	defer client.Close()
   408  
   409  	ctx := context.Background()
   410  
   411  	// run thie block twice, so that no matter what the default value of
   412  	// net.ipv4.ip_nonlocal_bind is, we can verify that setting the sysctl
   413  	// options works
   414  	for _, expected := range []string{"0", "1"} {
   415  
   416  		// store the map we're going to be using everywhere.
   417  		expectedSysctls := map[string]string{"net.ipv4.ip_nonlocal_bind": expected}
   418  
   419  		// Create the service with the sysctl options
   420  		var instances uint64 = 1
   421  		serviceID := swarm.CreateService(t, d,
   422  			swarm.ServiceWithSysctls(expectedSysctls),
   423  		)
   424  
   425  		// wait for the service to converge to 1 running task as expected
   426  		poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances))
   427  
   428  		// we're going to check 3 things:
   429  		//
   430  		//   1. Does the container, when inspected, have the sysctl option set?
   431  		//   2. Does the task have the sysctl in the spec?
   432  		//   3. Does the service have the sysctl in the spec?
   433  		//
   434  		// if all 3 of these things are true, we know that the sysctl has been
   435  		// plumbed correctly through the engine.
   436  		//
   437  		// We don't actually have to get inside the container and check its
   438  		// logs or anything. If we see the sysctl set on the container inspect,
   439  		// we know that the sysctl is plumbed correctly. everything below that
   440  		// level has been tested elsewhere. (thanks @thaJeztah, because an
   441  		// earlier version of this test had to get container logs and was much
   442  		// more complex)
   443  
   444  		// get all of the tasks of the service, so we can get the container
   445  		filter := filters.NewArgs()
   446  		filter.Add("service", serviceID)
   447  		tasks, err := client.TaskList(ctx, types.TaskListOptions{
   448  			Filters: filter,
   449  		})
   450  		assert.NilError(t, err)
   451  		assert.Check(t, is.Equal(len(tasks), 1))
   452  
   453  		// verify that the container has the sysctl option set
   454  		ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID)
   455  		assert.NilError(t, err)
   456  		assert.DeepEqual(t, ctnr.HostConfig.Sysctls, expectedSysctls)
   457  
   458  		// verify that the task has the sysctl option set in the task object
   459  		assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.Sysctls, expectedSysctls)
   460  
   461  		// verify that the service also has the sysctl set in the spec.
   462  		service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{})
   463  		assert.NilError(t, err)
   464  		assert.DeepEqual(t,
   465  			service.Spec.TaskTemplate.ContainerSpec.Sysctls, expectedSysctls,
   466  		)
   467  	}
   468  }
   469  
   470  // TestServiceCreateCapabilities tests that a service created with capabilities options in
   471  // the ContainerSpec correctly applies those options.
   472  //
   473  // To test this, we're going to create a service with the capabilities option
   474  //
   475  //   []string{"CAP_NET_RAW", "CAP_SYS_CHROOT"}
   476  //
   477  // We'll get the service's tasks to get the container ID, and then we'll
   478  // inspect the container. If the output of the container inspect contains the
   479  // capabilities option with the correct value, we can assume that the capabilities has been
   480  // plumbed correctly.
   481  func TestCreateServiceCapabilities(t *testing.T) {
   482  	skip.If(
   483  		t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.41"),
   484  		"setting service capabilities is unsupported before api v1.41",
   485  	)
   486  
   487  	defer setupTest(t)()
   488  	d := swarm.NewSwarm(t, testEnv)
   489  	defer d.Stop(t)
   490  	client := d.NewClientT(t)
   491  	defer client.Close()
   492  
   493  	ctx := context.Background()
   494  
   495  	// store the map we're going to be using everywhere.
   496  	capAdd := []string{"CAP_SYS_CHROOT"}
   497  	capDrop := []string{"CAP_NET_RAW"}
   498  
   499  	// Create the service with the capabilities options
   500  	var instances uint64 = 1
   501  	serviceID := swarm.CreateService(t, d,
   502  		swarm.ServiceWithCapabilities(capAdd, capDrop),
   503  	)
   504  
   505  	// wait for the service to converge to 1 running task as expected
   506  	poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances))
   507  
   508  	// we're going to check 3 things:
   509  	//
   510  	//   1. Does the container, when inspected, have the capabilities option set?
   511  	//   2. Does the task have the capabilities in the spec?
   512  	//   3. Does the service have the capabilities in the spec?
   513  	//
   514  	// if all 3 of these things are true, we know that the capabilities has been
   515  	// plumbed correctly through the engine.
   516  	//
   517  	// We don't actually have to get inside the container and check its
   518  	// logs or anything. If we see the capabilities set on the container inspect,
   519  	// we know that the capabilities is plumbed correctly. everything below that
   520  	// level has been tested elsewhere.
   521  
   522  	// get all of the tasks of the service, so we can get the container
   523  	filter := filters.NewArgs()
   524  	filter.Add("service", serviceID)
   525  	tasks, err := client.TaskList(ctx, types.TaskListOptions{
   526  		Filters: filter,
   527  	})
   528  	assert.NilError(t, err)
   529  	assert.Check(t, is.Equal(len(tasks), 1))
   530  
   531  	// verify that the container has the capabilities option set
   532  	ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID)
   533  	assert.NilError(t, err)
   534  	assert.DeepEqual(t, ctnr.HostConfig.CapAdd, strslice.StrSlice(capAdd))
   535  	assert.DeepEqual(t, ctnr.HostConfig.CapDrop, strslice.StrSlice(capDrop))
   536  
   537  	// verify that the task has the capabilities option set in the task object
   538  	assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityAdd, capAdd)
   539  	assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityDrop, capDrop)
   540  
   541  	// verify that the service also has the capabilities set in the spec.
   542  	service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{})
   543  	assert.NilError(t, err)
   544  	assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityAdd, capAdd)
   545  	assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityDrop, capDrop)
   546  }