github.com/rish1988/moby@v25.0.2+incompatible/integration/service/create_test.go (about)

     1  package service // import "github.com/docker/docker/integration/service"
     2  
     3  import (
     4  	"context"
     5  	"io"
     6  	"strings"
     7  	"testing"
     8  	"time"
     9  
    10  	"github.com/docker/docker/api/types"
    11  	"github.com/docker/docker/api/types/container"
    12  	"github.com/docker/docker/api/types/filters"
    13  	"github.com/docker/docker/api/types/strslice"
    14  	swarmtypes "github.com/docker/docker/api/types/swarm"
    15  	"github.com/docker/docker/client"
    16  	"github.com/docker/docker/errdefs"
    17  	"github.com/docker/docker/integration/internal/network"
    18  	"github.com/docker/docker/integration/internal/swarm"
    19  	"github.com/docker/docker/testutil"
    20  	"github.com/docker/docker/testutil/daemon"
    21  	"gotest.tools/v3/assert"
    22  	is "gotest.tools/v3/assert/cmp"
    23  	"gotest.tools/v3/poll"
    24  	"gotest.tools/v3/skip"
    25  )
    26  
    27  func TestServiceCreateInit(t *testing.T) {
    28  	ctx := setupTest(t)
    29  	t.Run("daemonInitDisabled", testServiceCreateInit(ctx, false))
    30  	t.Run("daemonInitEnabled", testServiceCreateInit(ctx, true))
    31  }
    32  
    33  func testServiceCreateInit(ctx context.Context, daemonEnabled bool) func(t *testing.T) {
    34  	return func(t *testing.T) {
    35  		_ = testutil.StartSpan(ctx, t)
    36  		ops := []daemon.Option{}
    37  
    38  		if daemonEnabled {
    39  			ops = append(ops, daemon.WithInit())
    40  		}
    41  		d := swarm.NewSwarm(ctx, t, testEnv, ops...)
    42  		defer d.Stop(t)
    43  		client := d.NewClientT(t)
    44  		defer client.Close()
    45  
    46  		booleanTrue := true
    47  		booleanFalse := false
    48  
    49  		serviceID := swarm.CreateService(ctx, t, d)
    50  		poll.WaitOn(t, swarm.RunningTasksCount(ctx, client, serviceID, 1), swarm.ServicePoll)
    51  		i := inspectServiceContainer(ctx, t, client, serviceID)
    52  		// HostConfig.Init == nil means that it delegates to daemon configuration
    53  		assert.Check(t, i.HostConfig.Init == nil)
    54  
    55  		serviceID = swarm.CreateService(ctx, t, d, swarm.ServiceWithInit(&booleanTrue))
    56  		poll.WaitOn(t, swarm.RunningTasksCount(ctx, client, serviceID, 1), swarm.ServicePoll)
    57  		i = inspectServiceContainer(ctx, t, client, serviceID)
    58  		assert.Check(t, is.Equal(true, *i.HostConfig.Init))
    59  
    60  		serviceID = swarm.CreateService(ctx, t, d, swarm.ServiceWithInit(&booleanFalse))
    61  		poll.WaitOn(t, swarm.RunningTasksCount(ctx, client, serviceID, 1), swarm.ServicePoll)
    62  		i = inspectServiceContainer(ctx, t, client, serviceID)
    63  		assert.Check(t, is.Equal(false, *i.HostConfig.Init))
    64  	}
    65  }
    66  
    67  func inspectServiceContainer(ctx context.Context, t *testing.T, client client.APIClient, serviceID string) types.ContainerJSON {
    68  	t.Helper()
    69  	containers, err := client.ContainerList(ctx, container.ListOptions{
    70  		Filters: filters.NewArgs(filters.Arg("label", "com.docker.swarm.service.id="+serviceID)),
    71  	})
    72  	assert.NilError(t, err)
    73  	assert.Check(t, is.Len(containers, 1))
    74  
    75  	i, err := client.ContainerInspect(ctx, containers[0].ID)
    76  	assert.NilError(t, err)
    77  	return i
    78  }
    79  
    80  func TestCreateServiceMultipleTimes(t *testing.T) {
    81  	skip.If(t, testEnv.DaemonInfo.OSType == "windows")
    82  	ctx := setupTest(t)
    83  
    84  	d := swarm.NewSwarm(ctx, t, testEnv)
    85  	defer d.Stop(t)
    86  	client := d.NewClientT(t)
    87  	defer client.Close()
    88  
    89  	overlayName := "overlay1_" + t.Name()
    90  	overlayID := network.CreateNoError(ctx, t, client, overlayName,
    91  		network.WithDriver("overlay"),
    92  	)
    93  
    94  	var instances uint64 = 4
    95  
    96  	serviceName := "TestService_" + t.Name()
    97  	serviceSpec := []swarm.ServiceSpecOpt{
    98  		swarm.ServiceWithReplicas(instances),
    99  		swarm.ServiceWithName(serviceName),
   100  		swarm.ServiceWithNetwork(overlayName),
   101  	}
   102  
   103  	serviceID := swarm.CreateService(ctx, t, d, serviceSpec...)
   104  	poll.WaitOn(t, swarm.RunningTasksCount(ctx, client, serviceID, instances), swarm.ServicePoll)
   105  
   106  	_, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{})
   107  	assert.NilError(t, err)
   108  
   109  	err = client.ServiceRemove(ctx, serviceID)
   110  	assert.NilError(t, err)
   111  
   112  	poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll)
   113  
   114  	serviceID2 := swarm.CreateService(ctx, t, d, serviceSpec...)
   115  	poll.WaitOn(t, swarm.RunningTasksCount(ctx, client, serviceID2, instances), swarm.ServicePoll)
   116  
   117  	err = client.ServiceRemove(ctx, serviceID2)
   118  	assert.NilError(t, err)
   119  
   120  	// we can't just wait on no tasks for the service, counter-intuitively.
   121  	// Tasks may briefly exist but not show up, if they are are in the process
   122  	// of being deallocated. To avoid this case, we should retry network remove
   123  	// a few times, to give tasks time to be deallcoated
   124  	poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID2), swarm.ServicePoll)
   125  
   126  	for retry := 0; retry < 5; retry++ {
   127  		err = client.NetworkRemove(ctx, overlayID)
   128  		// TODO(dperny): using strings.Contains for error checking is awful,
   129  		// but so is the fact that swarm functions don't return errdefs errors.
   130  		// I don't have time at this moment to fix the latter, so I guess I'll
   131  		// go with the former.
   132  		//
   133  		// The full error we're looking for is something like this:
   134  		//
   135  		// Error response from daemon: rpc error: code = FailedPrecondition desc = network %v is in use by task %v
   136  		//
   137  		// The safest way to catch this, I think, will be to match on "is in
   138  		// use by", as this is an uninterrupted string that best identifies
   139  		// this error.
   140  		if err == nil || !strings.Contains(err.Error(), "is in use by") {
   141  			// if there is no error, or the error isn't this kind of error,
   142  			// then we'll break the loop body, and either fail the test or
   143  			// continue.
   144  			break
   145  		}
   146  	}
   147  	assert.NilError(t, err)
   148  
   149  	poll.WaitOn(t, network.IsRemoved(ctx, client, overlayID), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second))
   150  }
   151  
   152  func TestCreateServiceConflict(t *testing.T) {
   153  	skip.If(t, testEnv.DaemonInfo.OSType == "windows")
   154  	ctx := setupTest(t)
   155  
   156  	d := swarm.NewSwarm(ctx, t, testEnv)
   157  	defer d.Stop(t)
   158  	c := d.NewClientT(t)
   159  	defer c.Close()
   160  
   161  	serviceName := "TestService_" + t.Name()
   162  	serviceSpec := []swarm.ServiceSpecOpt{
   163  		swarm.ServiceWithName(serviceName),
   164  	}
   165  
   166  	swarm.CreateService(ctx, t, d, serviceSpec...)
   167  
   168  	spec := swarm.CreateServiceSpec(t, serviceSpec...)
   169  	_, err := c.ServiceCreate(ctx, spec, types.ServiceCreateOptions{})
   170  	assert.Check(t, errdefs.IsConflict(err))
   171  	assert.ErrorContains(t, err, "service "+serviceName+" already exists")
   172  }
   173  
   174  func TestCreateServiceMaxReplicas(t *testing.T) {
   175  	ctx := setupTest(t)
   176  
   177  	d := swarm.NewSwarm(ctx, t, testEnv)
   178  	defer d.Stop(t)
   179  	client := d.NewClientT(t)
   180  	defer client.Close()
   181  
   182  	var maxReplicas uint64 = 2
   183  	serviceSpec := []swarm.ServiceSpecOpt{
   184  		swarm.ServiceWithReplicas(maxReplicas),
   185  		swarm.ServiceWithMaxReplicas(maxReplicas),
   186  	}
   187  
   188  	serviceID := swarm.CreateService(ctx, t, d, serviceSpec...)
   189  	poll.WaitOn(t, swarm.RunningTasksCount(ctx, client, serviceID, maxReplicas), swarm.ServicePoll)
   190  
   191  	_, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{})
   192  	assert.NilError(t, err)
   193  }
   194  
   195  func TestCreateServiceSecretFileMode(t *testing.T) {
   196  	skip.If(t, testEnv.DaemonInfo.OSType == "windows")
   197  	ctx := setupTest(t)
   198  
   199  	d := swarm.NewSwarm(ctx, t, testEnv)
   200  	defer d.Stop(t)
   201  	client := d.NewClientT(t)
   202  	defer client.Close()
   203  
   204  	secretName := "TestSecret_" + t.Name()
   205  	secretResp, err := client.SecretCreate(ctx, swarmtypes.SecretSpec{
   206  		Annotations: swarmtypes.Annotations{
   207  			Name: secretName,
   208  		},
   209  		Data: []byte("TESTSECRET"),
   210  	})
   211  	assert.NilError(t, err)
   212  
   213  	var instances uint64 = 1
   214  	serviceName := "TestService_" + t.Name()
   215  	serviceID := swarm.CreateService(ctx, t, d,
   216  		swarm.ServiceWithReplicas(instances),
   217  		swarm.ServiceWithName(serviceName),
   218  		swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/secret && sleep inf"}),
   219  		swarm.ServiceWithSecret(&swarmtypes.SecretReference{
   220  			File: &swarmtypes.SecretReferenceFileTarget{
   221  				Name: "/etc/secret",
   222  				UID:  "0",
   223  				GID:  "0",
   224  				Mode: 0o777,
   225  			},
   226  			SecretID:   secretResp.ID,
   227  			SecretName: secretName,
   228  		}),
   229  	)
   230  
   231  	poll.WaitOn(t, swarm.RunningTasksCount(ctx, client, serviceID, instances), swarm.ServicePoll)
   232  
   233  	body, err := client.ServiceLogs(ctx, serviceID, container.LogsOptions{
   234  		Tail:       "1",
   235  		ShowStdout: true,
   236  	})
   237  	assert.NilError(t, err)
   238  	defer body.Close()
   239  
   240  	content, err := io.ReadAll(body)
   241  	assert.NilError(t, err)
   242  	assert.Check(t, is.Contains(string(content), "-rwxrwxrwx"))
   243  
   244  	err = client.ServiceRemove(ctx, serviceID)
   245  	assert.NilError(t, err)
   246  	poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll)
   247  
   248  	err = client.SecretRemove(ctx, secretName)
   249  	assert.NilError(t, err)
   250  }
   251  
   252  func TestCreateServiceConfigFileMode(t *testing.T) {
   253  	skip.If(t, testEnv.DaemonInfo.OSType == "windows")
   254  	ctx := setupTest(t)
   255  
   256  	d := swarm.NewSwarm(ctx, t, testEnv)
   257  	defer d.Stop(t)
   258  	client := d.NewClientT(t)
   259  	defer client.Close()
   260  
   261  	configName := "TestConfig_" + t.Name()
   262  	configResp, err := client.ConfigCreate(ctx, swarmtypes.ConfigSpec{
   263  		Annotations: swarmtypes.Annotations{
   264  			Name: configName,
   265  		},
   266  		Data: []byte("TESTCONFIG"),
   267  	})
   268  	assert.NilError(t, err)
   269  
   270  	var instances uint64 = 1
   271  	serviceName := "TestService_" + t.Name()
   272  	serviceID := swarm.CreateService(ctx, t, d,
   273  		swarm.ServiceWithName(serviceName),
   274  		swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/config && sleep inf"}),
   275  		swarm.ServiceWithReplicas(instances),
   276  		swarm.ServiceWithConfig(&swarmtypes.ConfigReference{
   277  			File: &swarmtypes.ConfigReferenceFileTarget{
   278  				Name: "/etc/config",
   279  				UID:  "0",
   280  				GID:  "0",
   281  				Mode: 0o777,
   282  			},
   283  			ConfigID:   configResp.ID,
   284  			ConfigName: configName,
   285  		}),
   286  	)
   287  
   288  	poll.WaitOn(t, swarm.RunningTasksCount(ctx, client, serviceID, instances))
   289  
   290  	body, err := client.ServiceLogs(ctx, serviceID, container.LogsOptions{
   291  		Tail:       "1",
   292  		ShowStdout: true,
   293  	})
   294  	assert.NilError(t, err)
   295  	defer body.Close()
   296  
   297  	content, err := io.ReadAll(body)
   298  	assert.NilError(t, err)
   299  	assert.Check(t, is.Contains(string(content), "-rwxrwxrwx"))
   300  
   301  	err = client.ServiceRemove(ctx, serviceID)
   302  	assert.NilError(t, err)
   303  	poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID))
   304  
   305  	err = client.ConfigRemove(ctx, configName)
   306  	assert.NilError(t, err)
   307  }
   308  
   309  // TestServiceCreateSysctls tests that a service created with sysctl options in
   310  // the ContainerSpec correctly applies those options.
   311  //
   312  // To test this, we're going to create a service with the sysctl option
   313  //
   314  //	{"net.ipv4.ip_nonlocal_bind": "0"}
   315  //
   316  // We'll get the service's tasks to get the container ID, and then we'll
   317  // inspect the container. If the output of the container inspect contains the
   318  // sysctl option with the correct value, we can assume that the sysctl has been
   319  // plumbed correctly.
   320  //
   321  // Next, we'll remove that service and create a new service with that option
   322  // set to 1. This means that no matter what the default is, we can be confident
   323  // that the sysctl option is applying as intended.
   324  //
   325  // Additionally, we'll do service and task inspects to verify that the inspect
   326  // output includes the desired sysctl option.
   327  //
   328  // We're using net.ipv4.ip_nonlocal_bind because it's something that I'm fairly
   329  // confident won't be modified by the container runtime, and won't blow
   330  // anything up in the test environment
   331  func TestCreateServiceSysctls(t *testing.T) {
   332  	ctx := setupTest(t)
   333  
   334  	d := swarm.NewSwarm(ctx, t, testEnv)
   335  	defer d.Stop(t)
   336  	client := d.NewClientT(t)
   337  	defer client.Close()
   338  
   339  	// run thie block twice, so that no matter what the default value of
   340  	// net.ipv4.ip_nonlocal_bind is, we can verify that setting the sysctl
   341  	// options works
   342  	for _, expected := range []string{"0", "1"} {
   343  		// store the map we're going to be using everywhere.
   344  		expectedSysctls := map[string]string{"net.ipv4.ip_nonlocal_bind": expected}
   345  
   346  		// Create the service with the sysctl options
   347  		var instances uint64 = 1
   348  		serviceID := swarm.CreateService(ctx, t, d,
   349  			swarm.ServiceWithSysctls(expectedSysctls),
   350  		)
   351  
   352  		// wait for the service to converge to 1 running task as expected
   353  		poll.WaitOn(t, swarm.RunningTasksCount(ctx, client, serviceID, instances))
   354  
   355  		// we're going to check 3 things:
   356  		//
   357  		//   1. Does the container, when inspected, have the sysctl option set?
   358  		//   2. Does the task have the sysctl in the spec?
   359  		//   3. Does the service have the sysctl in the spec?
   360  		//
   361  		// if all 3 of these things are true, we know that the sysctl has been
   362  		// plumbed correctly through the engine.
   363  		//
   364  		// We don't actually have to get inside the container and check its
   365  		// logs or anything. If we see the sysctl set on the container inspect,
   366  		// we know that the sysctl is plumbed correctly. everything below that
   367  		// level has been tested elsewhere. (thanks @thaJeztah, because an
   368  		// earlier version of this test had to get container logs and was much
   369  		// more complex)
   370  
   371  		// get all tasks of the service, so we can get the container
   372  		tasks, err := client.TaskList(ctx, types.TaskListOptions{
   373  			Filters: filters.NewArgs(filters.Arg("service", serviceID)),
   374  		})
   375  		assert.NilError(t, err)
   376  		assert.Check(t, is.Equal(len(tasks), 1))
   377  
   378  		// verify that the container has the sysctl option set
   379  		ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID)
   380  		assert.NilError(t, err)
   381  		assert.DeepEqual(t, ctnr.HostConfig.Sysctls, expectedSysctls)
   382  
   383  		// verify that the task has the sysctl option set in the task object
   384  		assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.Sysctls, expectedSysctls)
   385  
   386  		// verify that the service also has the sysctl set in the spec.
   387  		service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{})
   388  		assert.NilError(t, err)
   389  		assert.DeepEqual(t,
   390  			service.Spec.TaskTemplate.ContainerSpec.Sysctls, expectedSysctls,
   391  		)
   392  	}
   393  }
   394  
   395  // TestServiceCreateCapabilities tests that a service created with capabilities options in
   396  // the ContainerSpec correctly applies those options.
   397  //
   398  // To test this, we're going to create a service with the capabilities option
   399  //
   400  //	[]string{"CAP_NET_RAW", "CAP_SYS_CHROOT"}
   401  //
   402  // We'll get the service's tasks to get the container ID, and then we'll
   403  // inspect the container. If the output of the container inspect contains the
   404  // capabilities option with the correct value, we can assume that the capabilities has been
   405  // plumbed correctly.
   406  func TestCreateServiceCapabilities(t *testing.T) {
   407  	ctx := setupTest(t)
   408  
   409  	d := swarm.NewSwarm(ctx, t, testEnv)
   410  	defer d.Stop(t)
   411  	client := d.NewClientT(t)
   412  	defer client.Close()
   413  
   414  	// store the map we're going to be using everywhere.
   415  	capAdd := []string{"CAP_SYS_CHROOT"}
   416  	capDrop := []string{"CAP_NET_RAW"}
   417  
   418  	// Create the service with the capabilities options
   419  	var instances uint64 = 1
   420  	serviceID := swarm.CreateService(ctx, t, d,
   421  		swarm.ServiceWithCapabilities(capAdd, capDrop),
   422  	)
   423  
   424  	// wait for the service to converge to 1 running task as expected
   425  	poll.WaitOn(t, swarm.RunningTasksCount(ctx, client, serviceID, instances))
   426  
   427  	// we're going to check 3 things:
   428  	//
   429  	//   1. Does the container, when inspected, have the capabilities option set?
   430  	//   2. Does the task have the capabilities in the spec?
   431  	//   3. Does the service have the capabilities in the spec?
   432  	//
   433  	// if all 3 of these things are true, we know that the capabilities has been
   434  	// plumbed correctly through the engine.
   435  	//
   436  	// We don't actually have to get inside the container and check its
   437  	// logs or anything. If we see the capabilities set on the container inspect,
   438  	// we know that the capabilities is plumbed correctly. everything below that
   439  	// level has been tested elsewhere.
   440  
   441  	// get all tasks of the service, so we can get the container
   442  	tasks, err := client.TaskList(ctx, types.TaskListOptions{
   443  		Filters: filters.NewArgs(filters.Arg("service", serviceID)),
   444  	})
   445  	assert.NilError(t, err)
   446  	assert.Check(t, is.Equal(len(tasks), 1))
   447  
   448  	// verify that the container has the capabilities option set
   449  	ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID)
   450  	assert.NilError(t, err)
   451  	assert.DeepEqual(t, ctnr.HostConfig.CapAdd, strslice.StrSlice(capAdd))
   452  	assert.DeepEqual(t, ctnr.HostConfig.CapDrop, strslice.StrSlice(capDrop))
   453  
   454  	// verify that the task has the capabilities option set in the task object
   455  	assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityAdd, capAdd)
   456  	assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityDrop, capDrop)
   457  
   458  	// verify that the service also has the capabilities set in the spec.
   459  	service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{})
   460  	assert.NilError(t, err)
   461  	assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityAdd, capAdd)
   462  	assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityDrop, capDrop)
   463  }