github.com/rawahars/moby@v24.0.4+incompatible/integration/service/create_test.go (about) 1 package service // import "github.com/docker/docker/integration/service" 2 3 import ( 4 "context" 5 "io" 6 "strings" 7 "testing" 8 "time" 9 10 "github.com/docker/docker/api/types" 11 "github.com/docker/docker/api/types/filters" 12 "github.com/docker/docker/api/types/strslice" 13 swarmtypes "github.com/docker/docker/api/types/swarm" 14 "github.com/docker/docker/api/types/versions" 15 "github.com/docker/docker/client" 16 "github.com/docker/docker/errdefs" 17 "github.com/docker/docker/integration/internal/network" 18 "github.com/docker/docker/integration/internal/swarm" 19 "github.com/docker/docker/testutil/daemon" 20 "gotest.tools/v3/assert" 21 is "gotest.tools/v3/assert/cmp" 22 "gotest.tools/v3/poll" 23 "gotest.tools/v3/skip" 24 ) 25 26 func TestServiceCreateInit(t *testing.T) { 27 defer setupTest(t)() 28 t.Run("daemonInitDisabled", testServiceCreateInit(false)) 29 t.Run("daemonInitEnabled", testServiceCreateInit(true)) 30 } 31 32 func testServiceCreateInit(daemonEnabled bool) func(t *testing.T) { 33 return func(t *testing.T) { 34 var ops = []daemon.Option{} 35 36 if daemonEnabled { 37 ops = append(ops, daemon.WithInit()) 38 } 39 d := swarm.NewSwarm(t, testEnv, ops...) 40 defer d.Stop(t) 41 client := d.NewClientT(t) 42 defer client.Close() 43 44 booleanTrue := true 45 booleanFalse := false 46 47 serviceID := swarm.CreateService(t, d) 48 poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) 49 i := inspectServiceContainer(t, client, serviceID) 50 // HostConfig.Init == nil means that it delegates to daemon configuration 51 assert.Check(t, i.HostConfig.Init == nil) 52 53 serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanTrue)) 54 poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) 55 i = inspectServiceContainer(t, client, serviceID) 56 assert.Check(t, is.Equal(true, *i.HostConfig.Init)) 57 58 serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanFalse)) 59 poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) 60 i = inspectServiceContainer(t, client, serviceID) 61 assert.Check(t, is.Equal(false, *i.HostConfig.Init)) 62 } 63 } 64 65 func inspectServiceContainer(t *testing.T, client client.APIClient, serviceID string) types.ContainerJSON { 66 t.Helper() 67 containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{ 68 Filters: filters.NewArgs(filters.Arg("label", "com.docker.swarm.service.id="+serviceID)), 69 }) 70 assert.NilError(t, err) 71 assert.Check(t, is.Len(containers, 1)) 72 73 i, err := client.ContainerInspect(context.Background(), containers[0].ID) 74 assert.NilError(t, err) 75 return i 76 } 77 78 func TestCreateServiceMultipleTimes(t *testing.T) { 79 skip.If(t, testEnv.DaemonInfo.OSType == "windows") 80 defer setupTest(t)() 81 d := swarm.NewSwarm(t, testEnv) 82 defer d.Stop(t) 83 client := d.NewClientT(t) 84 defer client.Close() 85 ctx := context.Background() 86 87 overlayName := "overlay1_" + t.Name() 88 overlayID := network.CreateNoError(ctx, t, client, overlayName, 89 network.WithCheckDuplicate(), 90 network.WithDriver("overlay"), 91 ) 92 93 var instances uint64 = 4 94 95 serviceName := "TestService_" + t.Name() 96 serviceSpec := []swarm.ServiceSpecOpt{ 97 swarm.ServiceWithReplicas(instances), 98 swarm.ServiceWithName(serviceName), 99 swarm.ServiceWithNetwork(overlayName), 100 } 101 102 serviceID := swarm.CreateService(t, d, serviceSpec...) 103 poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) 104 105 _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) 106 assert.NilError(t, err) 107 108 err = client.ServiceRemove(context.Background(), serviceID) 109 assert.NilError(t, err) 110 111 poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) 112 113 serviceID2 := swarm.CreateService(t, d, serviceSpec...) 114 poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID2, instances), swarm.ServicePoll) 115 116 err = client.ServiceRemove(context.Background(), serviceID2) 117 assert.NilError(t, err) 118 119 // we can't just wait on no tasks for the service, counter-intuitively. 120 // Tasks may briefly exist but not show up, if they are are in the process 121 // of being deallocated. To avoid this case, we should retry network remove 122 // a few times, to give tasks time to be deallcoated 123 poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID2), swarm.ServicePoll) 124 125 for retry := 0; retry < 5; retry++ { 126 err = client.NetworkRemove(context.Background(), overlayID) 127 // TODO(dperny): using strings.Contains for error checking is awful, 128 // but so is the fact that swarm functions don't return errdefs errors. 129 // I don't have time at this moment to fix the latter, so I guess I'll 130 // go with the former. 131 // 132 // The full error we're looking for is something like this: 133 // 134 // Error response from daemon: rpc error: code = FailedPrecondition desc = network %v is in use by task %v 135 // 136 // The safest way to catch this, I think, will be to match on "is in 137 // use by", as this is an uninterrupted string that best identifies 138 // this error. 139 if err == nil || !strings.Contains(err.Error(), "is in use by") { 140 // if there is no error, or the error isn't this kind of error, 141 // then we'll break the loop body, and either fail the test or 142 // continue. 143 break 144 } 145 } 146 assert.NilError(t, err) 147 148 poll.WaitOn(t, network.IsRemoved(context.Background(), client, overlayID), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) 149 } 150 151 func TestCreateServiceConflict(t *testing.T) { 152 skip.If(t, testEnv.DaemonInfo.OSType == "windows") 153 defer setupTest(t)() 154 d := swarm.NewSwarm(t, testEnv) 155 defer d.Stop(t) 156 c := d.NewClientT(t) 157 defer c.Close() 158 ctx := context.Background() 159 160 serviceName := "TestService_" + t.Name() 161 serviceSpec := []swarm.ServiceSpecOpt{ 162 swarm.ServiceWithName(serviceName), 163 } 164 165 swarm.CreateService(t, d, serviceSpec...) 166 167 spec := swarm.CreateServiceSpec(t, serviceSpec...) 168 _, err := c.ServiceCreate(ctx, spec, types.ServiceCreateOptions{}) 169 assert.Check(t, errdefs.IsConflict(err)) 170 assert.ErrorContains(t, err, "service "+serviceName+" already exists") 171 } 172 173 func TestCreateServiceMaxReplicas(t *testing.T) { 174 defer setupTest(t)() 175 d := swarm.NewSwarm(t, testEnv) 176 defer d.Stop(t) 177 client := d.NewClientT(t) 178 defer client.Close() 179 180 var maxReplicas uint64 = 2 181 serviceSpec := []swarm.ServiceSpecOpt{ 182 swarm.ServiceWithReplicas(maxReplicas), 183 swarm.ServiceWithMaxReplicas(maxReplicas), 184 } 185 186 serviceID := swarm.CreateService(t, d, serviceSpec...) 187 poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, maxReplicas), swarm.ServicePoll) 188 189 _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) 190 assert.NilError(t, err) 191 } 192 193 func TestCreateWithDuplicateNetworkNames(t *testing.T) { 194 skip.If(t, testEnv.DaemonInfo.OSType == "windows") 195 defer setupTest(t)() 196 d := swarm.NewSwarm(t, testEnv) 197 defer d.Stop(t) 198 client := d.NewClientT(t) 199 defer client.Close() 200 ctx := context.Background() 201 202 name := "foo_" + t.Name() 203 n1 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge")) 204 n2 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge")) 205 206 // Duplicates with name but with different driver 207 n3 := network.CreateNoError(ctx, t, client, name, network.WithDriver("overlay")) 208 209 // Create Service with the same name 210 var instances uint64 = 1 211 212 serviceName := "top_" + t.Name() 213 serviceID := swarm.CreateService(t, d, 214 swarm.ServiceWithReplicas(instances), 215 swarm.ServiceWithName(serviceName), 216 swarm.ServiceWithNetwork(name), 217 ) 218 219 poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) 220 221 resp, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) 222 assert.NilError(t, err) 223 assert.Check(t, is.Equal(n3, resp.Spec.TaskTemplate.Networks[0].Target)) 224 225 // Remove Service, and wait for its tasks to be removed 226 err = client.ServiceRemove(ctx, serviceID) 227 assert.NilError(t, err) 228 poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) 229 230 // Remove networks 231 err = client.NetworkRemove(context.Background(), n3) 232 assert.NilError(t, err) 233 234 err = client.NetworkRemove(context.Background(), n2) 235 assert.NilError(t, err) 236 237 err = client.NetworkRemove(context.Background(), n1) 238 assert.NilError(t, err) 239 240 // Make sure networks have been destroyed. 241 poll.WaitOn(t, network.IsRemoved(context.Background(), client, n3), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) 242 poll.WaitOn(t, network.IsRemoved(context.Background(), client, n2), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) 243 poll.WaitOn(t, network.IsRemoved(context.Background(), client, n1), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) 244 } 245 246 func TestCreateServiceSecretFileMode(t *testing.T) { 247 skip.If(t, testEnv.DaemonInfo.OSType == "windows") 248 defer setupTest(t)() 249 d := swarm.NewSwarm(t, testEnv) 250 defer d.Stop(t) 251 client := d.NewClientT(t) 252 defer client.Close() 253 254 ctx := context.Background() 255 secretName := "TestSecret_" + t.Name() 256 secretResp, err := client.SecretCreate(ctx, swarmtypes.SecretSpec{ 257 Annotations: swarmtypes.Annotations{ 258 Name: secretName, 259 }, 260 Data: []byte("TESTSECRET"), 261 }) 262 assert.NilError(t, err) 263 264 var instances uint64 = 1 265 serviceName := "TestService_" + t.Name() 266 serviceID := swarm.CreateService(t, d, 267 swarm.ServiceWithReplicas(instances), 268 swarm.ServiceWithName(serviceName), 269 swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/secret && sleep inf"}), 270 swarm.ServiceWithSecret(&swarmtypes.SecretReference{ 271 File: &swarmtypes.SecretReferenceFileTarget{ 272 Name: "/etc/secret", 273 UID: "0", 274 GID: "0", 275 Mode: 0777, 276 }, 277 SecretID: secretResp.ID, 278 SecretName: secretName, 279 }), 280 ) 281 282 poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) 283 284 body, err := client.ServiceLogs(ctx, serviceID, types.ContainerLogsOptions{ 285 Tail: "1", 286 ShowStdout: true, 287 }) 288 assert.NilError(t, err) 289 defer body.Close() 290 291 content, err := io.ReadAll(body) 292 assert.NilError(t, err) 293 assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) 294 295 err = client.ServiceRemove(ctx, serviceID) 296 assert.NilError(t, err) 297 poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) 298 299 err = client.SecretRemove(ctx, secretName) 300 assert.NilError(t, err) 301 } 302 303 func TestCreateServiceConfigFileMode(t *testing.T) { 304 skip.If(t, testEnv.DaemonInfo.OSType == "windows") 305 defer setupTest(t)() 306 d := swarm.NewSwarm(t, testEnv) 307 defer d.Stop(t) 308 client := d.NewClientT(t) 309 defer client.Close() 310 311 ctx := context.Background() 312 configName := "TestConfig_" + t.Name() 313 configResp, err := client.ConfigCreate(ctx, swarmtypes.ConfigSpec{ 314 Annotations: swarmtypes.Annotations{ 315 Name: configName, 316 }, 317 Data: []byte("TESTCONFIG"), 318 }) 319 assert.NilError(t, err) 320 321 var instances uint64 = 1 322 serviceName := "TestService_" + t.Name() 323 serviceID := swarm.CreateService(t, d, 324 swarm.ServiceWithName(serviceName), 325 swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/config && sleep inf"}), 326 swarm.ServiceWithReplicas(instances), 327 swarm.ServiceWithConfig(&swarmtypes.ConfigReference{ 328 File: &swarmtypes.ConfigReferenceFileTarget{ 329 Name: "/etc/config", 330 UID: "0", 331 GID: "0", 332 Mode: 0777, 333 }, 334 ConfigID: configResp.ID, 335 ConfigName: configName, 336 }), 337 ) 338 339 poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) 340 341 body, err := client.ServiceLogs(ctx, serviceID, types.ContainerLogsOptions{ 342 Tail: "1", 343 ShowStdout: true, 344 }) 345 assert.NilError(t, err) 346 defer body.Close() 347 348 content, err := io.ReadAll(body) 349 assert.NilError(t, err) 350 assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) 351 352 err = client.ServiceRemove(ctx, serviceID) 353 assert.NilError(t, err) 354 poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID)) 355 356 err = client.ConfigRemove(ctx, configName) 357 assert.NilError(t, err) 358 } 359 360 // TestServiceCreateSysctls tests that a service created with sysctl options in 361 // the ContainerSpec correctly applies those options. 362 // 363 // To test this, we're going to create a service with the sysctl option 364 // 365 // {"net.ipv4.ip_nonlocal_bind": "0"} 366 // 367 // We'll get the service's tasks to get the container ID, and then we'll 368 // inspect the container. If the output of the container inspect contains the 369 // sysctl option with the correct value, we can assume that the sysctl has been 370 // plumbed correctly. 371 // 372 // Next, we'll remove that service and create a new service with that option 373 // set to 1. This means that no matter what the default is, we can be confident 374 // that the sysctl option is applying as intended. 375 // 376 // Additionally, we'll do service and task inspects to verify that the inspect 377 // output includes the desired sysctl option. 378 // 379 // We're using net.ipv4.ip_nonlocal_bind because it's something that I'm fairly 380 // confident won't be modified by the container runtime, and won't blow 381 // anything up in the test environment 382 func TestCreateServiceSysctls(t *testing.T) { 383 skip.If( 384 t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.40"), 385 "setting service sysctls is unsupported before api v1.40", 386 ) 387 388 defer setupTest(t)() 389 d := swarm.NewSwarm(t, testEnv) 390 defer d.Stop(t) 391 client := d.NewClientT(t) 392 defer client.Close() 393 394 ctx := context.Background() 395 396 // run thie block twice, so that no matter what the default value of 397 // net.ipv4.ip_nonlocal_bind is, we can verify that setting the sysctl 398 // options works 399 for _, expected := range []string{"0", "1"} { 400 // store the map we're going to be using everywhere. 401 expectedSysctls := map[string]string{"net.ipv4.ip_nonlocal_bind": expected} 402 403 // Create the service with the sysctl options 404 var instances uint64 = 1 405 serviceID := swarm.CreateService(t, d, 406 swarm.ServiceWithSysctls(expectedSysctls), 407 ) 408 409 // wait for the service to converge to 1 running task as expected 410 poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) 411 412 // we're going to check 3 things: 413 // 414 // 1. Does the container, when inspected, have the sysctl option set? 415 // 2. Does the task have the sysctl in the spec? 416 // 3. Does the service have the sysctl in the spec? 417 // 418 // if all 3 of these things are true, we know that the sysctl has been 419 // plumbed correctly through the engine. 420 // 421 // We don't actually have to get inside the container and check its 422 // logs or anything. If we see the sysctl set on the container inspect, 423 // we know that the sysctl is plumbed correctly. everything below that 424 // level has been tested elsewhere. (thanks @thaJeztah, because an 425 // earlier version of this test had to get container logs and was much 426 // more complex) 427 428 // get all tasks of the service, so we can get the container 429 tasks, err := client.TaskList(ctx, types.TaskListOptions{ 430 Filters: filters.NewArgs(filters.Arg("service", serviceID)), 431 }) 432 assert.NilError(t, err) 433 assert.Check(t, is.Equal(len(tasks), 1)) 434 435 // verify that the container has the sysctl option set 436 ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID) 437 assert.NilError(t, err) 438 assert.DeepEqual(t, ctnr.HostConfig.Sysctls, expectedSysctls) 439 440 // verify that the task has the sysctl option set in the task object 441 assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.Sysctls, expectedSysctls) 442 443 // verify that the service also has the sysctl set in the spec. 444 service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) 445 assert.NilError(t, err) 446 assert.DeepEqual(t, 447 service.Spec.TaskTemplate.ContainerSpec.Sysctls, expectedSysctls, 448 ) 449 } 450 } 451 452 // TestServiceCreateCapabilities tests that a service created with capabilities options in 453 // the ContainerSpec correctly applies those options. 454 // 455 // To test this, we're going to create a service with the capabilities option 456 // 457 // []string{"CAP_NET_RAW", "CAP_SYS_CHROOT"} 458 // 459 // We'll get the service's tasks to get the container ID, and then we'll 460 // inspect the container. If the output of the container inspect contains the 461 // capabilities option with the correct value, we can assume that the capabilities has been 462 // plumbed correctly. 463 func TestCreateServiceCapabilities(t *testing.T) { 464 skip.If( 465 t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.41"), 466 "setting service capabilities is unsupported before api v1.41", 467 ) 468 469 defer setupTest(t)() 470 d := swarm.NewSwarm(t, testEnv) 471 defer d.Stop(t) 472 client := d.NewClientT(t) 473 defer client.Close() 474 475 ctx := context.Background() 476 477 // store the map we're going to be using everywhere. 478 capAdd := []string{"CAP_SYS_CHROOT"} 479 capDrop := []string{"CAP_NET_RAW"} 480 481 // Create the service with the capabilities options 482 var instances uint64 = 1 483 serviceID := swarm.CreateService(t, d, 484 swarm.ServiceWithCapabilities(capAdd, capDrop), 485 ) 486 487 // wait for the service to converge to 1 running task as expected 488 poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) 489 490 // we're going to check 3 things: 491 // 492 // 1. Does the container, when inspected, have the capabilities option set? 493 // 2. Does the task have the capabilities in the spec? 494 // 3. Does the service have the capabilities in the spec? 495 // 496 // if all 3 of these things are true, we know that the capabilities has been 497 // plumbed correctly through the engine. 498 // 499 // We don't actually have to get inside the container and check its 500 // logs or anything. If we see the capabilities set on the container inspect, 501 // we know that the capabilities is plumbed correctly. everything below that 502 // level has been tested elsewhere. 503 504 // get all tasks of the service, so we can get the container 505 tasks, err := client.TaskList(ctx, types.TaskListOptions{ 506 Filters: filters.NewArgs(filters.Arg("service", serviceID)), 507 }) 508 assert.NilError(t, err) 509 assert.Check(t, is.Equal(len(tasks), 1)) 510 511 // verify that the container has the capabilities option set 512 ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID) 513 assert.NilError(t, err) 514 assert.DeepEqual(t, ctnr.HostConfig.CapAdd, strslice.StrSlice(capAdd)) 515 assert.DeepEqual(t, ctnr.HostConfig.CapDrop, strslice.StrSlice(capDrop)) 516 517 // verify that the task has the capabilities option set in the task object 518 assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityAdd, capAdd) 519 assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityDrop, capDrop) 520 521 // verify that the service also has the capabilities set in the spec. 522 service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) 523 assert.NilError(t, err) 524 assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityAdd, capAdd) 525 assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityDrop, capDrop) 526 }