github.1git.de/docker/cli@v26.1.3+incompatible/cli/command/service/update.go (about) 1 package service 2 3 import ( 4 "context" 5 "fmt" 6 "sort" 7 "strings" 8 "time" 9 10 "github.com/docker/cli/cli" 11 "github.com/docker/cli/cli/command" 12 "github.com/docker/cli/opts" 13 "github.com/docker/docker/api/types" 14 "github.com/docker/docker/api/types/container" 15 mounttypes "github.com/docker/docker/api/types/mount" 16 "github.com/docker/docker/api/types/swarm" 17 "github.com/docker/docker/api/types/versions" 18 "github.com/docker/docker/client" 19 units "github.com/docker/go-units" 20 "github.com/moby/swarmkit/v2/api/defaults" 21 "github.com/pkg/errors" 22 "github.com/spf13/cobra" 23 "github.com/spf13/pflag" 24 ) 25 26 func newUpdateCommand(dockerCli command.Cli) *cobra.Command { 27 options := newServiceOptions() 28 29 cmd := &cobra.Command{ 30 Use: "update [OPTIONS] SERVICE", 31 Short: "Update a service", 32 Args: cli.ExactArgs(1), 33 RunE: func(cmd *cobra.Command, args []string) error { 34 return runUpdate(cmd.Context(), dockerCli, cmd.Flags(), options, args[0]) 35 }, 36 ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { 37 return CompletionFn(dockerCli)(cmd, args, toComplete) 38 }, 39 } 40 41 flags := cmd.Flags() 42 flags.String("image", "", "Service image tag") 43 flags.Var(&ShlexOpt{}, "args", "Service command args") 44 flags.Bool(flagRollback, false, "Rollback to previous specification") 45 flags.SetAnnotation(flagRollback, "version", []string{"1.25"}) 46 flags.Bool("force", false, "Force update even if no changes require it") 47 flags.SetAnnotation("force", "version", []string{"1.25"}) 48 addServiceFlags(flags, options, nil) 49 50 flags.Var(newListOptsVar(), flagEnvRemove, "Remove an environment variable") 51 flags.Var(newListOptsVar(), flagGroupRemove, "Remove a previously added supplementary user group from the container") 52 flags.SetAnnotation(flagGroupRemove, "version", []string{"1.25"}) 53 flags.Var(newListOptsVar(), flagLabelRemove, "Remove a label by its key") 54 flags.Var(newListOptsVar(), flagContainerLabelRemove, "Remove a container label by its key") 55 flags.Var(newListOptsVar(), flagMountRemove, "Remove a mount by its target path") 56 // flags.Var(newListOptsVar().WithValidator(validatePublishRemove), flagPublishRemove, "Remove a published port by its target port") 57 flags.Var(&opts.PortOpt{}, flagPublishRemove, "Remove a published port by its target port") 58 flags.Var(newListOptsVar(), flagConstraintRemove, "Remove a constraint") 59 flags.Var(newListOptsVar(), flagDNSRemove, "Remove a custom DNS server") 60 flags.SetAnnotation(flagDNSRemove, "version", []string{"1.25"}) 61 flags.Var(newListOptsVar(), flagDNSOptionRemove, "Remove a DNS option") 62 flags.SetAnnotation(flagDNSOptionRemove, "version", []string{"1.25"}) 63 flags.Var(newListOptsVar(), flagDNSSearchRemove, "Remove a DNS search domain") 64 flags.SetAnnotation(flagDNSSearchRemove, "version", []string{"1.25"}) 65 flags.Var(newListOptsVar(), flagHostRemove, `Remove a custom host-to-IP mapping ("host:ip")`) 66 flags.SetAnnotation(flagHostRemove, "version", []string{"1.25"}) 67 flags.Var(&options.labels, flagLabelAdd, "Add or update a service label") 68 flags.Var(&options.containerLabels, flagContainerLabelAdd, "Add or update a container label") 69 flags.Var(&options.env, flagEnvAdd, "Add or update an environment variable") 70 flags.Var(newListOptsVar(), flagSecretRemove, "Remove a secret") 71 flags.SetAnnotation(flagSecretRemove, "version", []string{"1.25"}) 72 flags.Var(&options.secrets, flagSecretAdd, "Add or update a secret on a service") 73 flags.SetAnnotation(flagSecretAdd, "version", []string{"1.25"}) 74 75 flags.Var(newListOptsVar(), flagConfigRemove, "Remove a configuration file") 76 flags.SetAnnotation(flagConfigRemove, "version", []string{"1.30"}) 77 flags.Var(&options.configs, flagConfigAdd, "Add or update a config file on a service") 78 flags.SetAnnotation(flagConfigAdd, "version", []string{"1.30"}) 79 80 flags.Var(&options.mounts, flagMountAdd, "Add or update a mount on a service") 81 flags.Var(&options.constraints, flagConstraintAdd, "Add or update a placement constraint") 82 flags.Var(&options.placementPrefs, flagPlacementPrefAdd, "Add a placement preference") 83 flags.SetAnnotation(flagPlacementPrefAdd, "version", []string{"1.28"}) 84 flags.Var(&placementPrefOpts{}, flagPlacementPrefRemove, "Remove a placement preference") 85 flags.SetAnnotation(flagPlacementPrefRemove, "version", []string{"1.28"}) 86 flags.Var(&options.networks, flagNetworkAdd, "Add a network") 87 flags.SetAnnotation(flagNetworkAdd, "version", []string{"1.29"}) 88 flags.Var(newListOptsVar(), flagNetworkRemove, "Remove a network") 89 flags.SetAnnotation(flagNetworkRemove, "version", []string{"1.29"}) 90 flags.Var(&options.endpoint.publishPorts, flagPublishAdd, "Add or update a published port") 91 flags.Var(&options.groups, flagGroupAdd, "Add an additional supplementary user group to the container") 92 flags.SetAnnotation(flagGroupAdd, "version", []string{"1.25"}) 93 flags.Var(&options.dns, flagDNSAdd, "Add or update a custom DNS server") 94 flags.SetAnnotation(flagDNSAdd, "version", []string{"1.25"}) 95 flags.Var(&options.dnsOption, flagDNSOptionAdd, "Add or update a DNS option") 96 flags.SetAnnotation(flagDNSOptionAdd, "version", []string{"1.25"}) 97 flags.Var(&options.dnsSearch, flagDNSSearchAdd, "Add or update a custom DNS search domain") 98 flags.SetAnnotation(flagDNSSearchAdd, "version", []string{"1.25"}) 99 flags.Var(&options.hosts, flagHostAdd, `Add a custom host-to-IP mapping ("host:ip")`) 100 flags.SetAnnotation(flagHostAdd, "version", []string{"1.25"}) 101 flags.BoolVar(&options.init, flagInit, false, "Use an init inside each service container to forward signals and reap processes") 102 flags.SetAnnotation(flagInit, "version", []string{"1.37"}) 103 flags.Var(&options.sysctls, flagSysCtlAdd, "Add or update a Sysctl option") 104 flags.SetAnnotation(flagSysCtlAdd, "version", []string{"1.40"}) 105 flags.Var(newListOptsVar(), flagSysCtlRemove, "Remove a Sysctl option") 106 flags.SetAnnotation(flagSysCtlRemove, "version", []string{"1.40"}) 107 flags.Var(&options.ulimits, flagUlimitAdd, "Add or update a ulimit option") 108 flags.SetAnnotation(flagUlimitAdd, "version", []string{"1.41"}) 109 flags.Var(newListOptsVar(), flagUlimitRemove, "Remove a ulimit option") 110 flags.SetAnnotation(flagUlimitRemove, "version", []string{"1.41"}) 111 112 // Add needs parsing, Remove only needs the key 113 flags.Var(newListOptsVar(), flagGenericResourcesRemove, "Remove a Generic resource") 114 flags.SetAnnotation(flagHostAdd, "version", []string{"1.32"}) 115 flags.Var(newListOptsVarWithValidator(ValidateSingleGenericResource), flagGenericResourcesAdd, "Add a Generic resource") 116 flags.SetAnnotation(flagHostAdd, "version", []string{"1.32"}) 117 118 return cmd 119 } 120 121 func newListOptsVar() *opts.ListOpts { 122 return opts.NewListOptsRef(&[]string{}, nil) 123 } 124 125 func newListOptsVarWithValidator(validator opts.ValidatorFctType) *opts.ListOpts { 126 return opts.NewListOptsRef(&[]string{}, validator) 127 } 128 129 //nolint:gocyclo 130 func runUpdate(ctx context.Context, dockerCli command.Cli, flags *pflag.FlagSet, options *serviceOptions, serviceID string) error { 131 apiClient := dockerCli.Client() 132 133 service, _, err := apiClient.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) 134 if err != nil { 135 return err 136 } 137 138 rollback, err := flags.GetBool(flagRollback) 139 if err != nil { 140 return err 141 } 142 143 // There are two ways to do user-requested rollback. The old way is 144 // client-side, but with a sufficiently recent daemon we prefer 145 // server-side, because it will honor the rollback parameters. 146 var ( 147 clientSideRollback bool 148 serverSideRollback bool 149 ) 150 151 spec := &service.Spec 152 if rollback { 153 // Rollback can't be combined with other flags. 154 otherFlagsPassed := false 155 flags.VisitAll(func(f *pflag.Flag) { 156 if f.Name == flagRollback || f.Name == flagDetach || f.Name == flagQuiet { 157 return 158 } 159 if flags.Changed(f.Name) { 160 otherFlagsPassed = true 161 } 162 }) 163 if otherFlagsPassed { 164 return errors.New("other flags may not be combined with --rollback") 165 } 166 167 if versions.LessThan(apiClient.ClientVersion(), "1.28") { 168 clientSideRollback = true 169 spec = service.PreviousSpec 170 if spec == nil { 171 return errors.Errorf("service does not have a previous specification to roll back to") 172 } 173 } else { 174 serverSideRollback = true 175 } 176 } 177 178 updateOpts := types.ServiceUpdateOptions{} 179 if serverSideRollback { 180 updateOpts.Rollback = "previous" 181 } 182 183 err = updateService(ctx, apiClient, flags, spec) 184 if err != nil { 185 return err 186 } 187 188 if flags.Changed("image") { 189 if err := resolveServiceImageDigestContentTrust(dockerCli, spec); err != nil { 190 return err 191 } 192 if !options.noResolveImage && versions.GreaterThanOrEqualTo(apiClient.ClientVersion(), "1.30") { 193 updateOpts.QueryRegistry = true 194 } 195 } 196 197 updatedSecrets, err := getUpdatedSecrets(ctx, apiClient, flags, spec.TaskTemplate.ContainerSpec.Secrets) 198 if err != nil { 199 return err 200 } 201 202 spec.TaskTemplate.ContainerSpec.Secrets = updatedSecrets 203 204 updatedConfigs, err := getUpdatedConfigs(ctx, apiClient, flags, spec.TaskTemplate.ContainerSpec) 205 if err != nil { 206 return err 207 } 208 209 spec.TaskTemplate.ContainerSpec.Configs = updatedConfigs 210 211 // set the credential spec value after get the updated configs, because we 212 // might need the updated configs to set the correct value of the 213 // CredentialSpec. 214 updateCredSpecConfig(flags, spec.TaskTemplate.ContainerSpec) 215 216 // only send auth if flag was set 217 sendAuth, err := flags.GetBool(flagRegistryAuth) 218 if err != nil { 219 return err 220 } 221 switch { 222 case sendAuth: 223 // Retrieve encoded auth token from the image reference 224 // This would be the old image if it didn't change in this update 225 image := spec.TaskTemplate.ContainerSpec.Image 226 encodedAuth, err := command.RetrieveAuthTokenFromImage(dockerCli.ConfigFile(), image) 227 if err != nil { 228 return err 229 } 230 updateOpts.EncodedRegistryAuth = encodedAuth 231 case clientSideRollback: 232 updateOpts.RegistryAuthFrom = types.RegistryAuthFromPreviousSpec 233 default: 234 updateOpts.RegistryAuthFrom = types.RegistryAuthFromSpec 235 } 236 237 response, err := apiClient.ServiceUpdate(ctx, service.ID, service.Version, *spec, updateOpts) 238 if err != nil { 239 return err 240 } 241 242 for _, warning := range response.Warnings { 243 fmt.Fprintln(dockerCli.Err(), warning) 244 } 245 246 fmt.Fprintf(dockerCli.Out(), "%s\n", serviceID) 247 248 if options.detach || versions.LessThan(apiClient.ClientVersion(), "1.29") { 249 return nil 250 } 251 252 return WaitOnService(ctx, dockerCli, serviceID, options.quiet) 253 } 254 255 //nolint:gocyclo 256 func updateService(ctx context.Context, apiClient client.NetworkAPIClient, flags *pflag.FlagSet, spec *swarm.ServiceSpec) error { 257 updateBoolPtr := func(flag string, field **bool) { 258 if flags.Changed(flag) { 259 b, _ := flags.GetBool(flag) 260 *field = &b 261 } 262 } 263 updateString := func(flag string, field *string) { 264 if flags.Changed(flag) { 265 *field, _ = flags.GetString(flag) 266 } 267 } 268 269 updateInt64Value := func(flag string, field *int64) { 270 if flags.Changed(flag) { 271 *field = flags.Lookup(flag).Value.(int64Value).Value() 272 } 273 } 274 275 updateFloatValue := func(flag string, field *float32) { 276 if flags.Changed(flag) { 277 *field = flags.Lookup(flag).Value.(*floatValue).Value() 278 } 279 } 280 281 updateDuration := func(flag string, field *time.Duration) { 282 if flags.Changed(flag) { 283 *field, _ = flags.GetDuration(flag) 284 } 285 } 286 287 updateDurationOpt := func(flag string, field **time.Duration) { 288 if flags.Changed(flag) { 289 val := *flags.Lookup(flag).Value.(*opts.DurationOpt).Value() 290 *field = &val 291 } 292 } 293 294 updateInt64 := func(flag string, field *int64) { 295 if flags.Changed(flag) { 296 *field, _ = flags.GetInt64(flag) 297 } 298 } 299 300 updateUint64 := func(flag string, field *uint64) { 301 if flags.Changed(flag) { 302 *field, _ = flags.GetUint64(flag) 303 } 304 } 305 306 updateUint64Opt := func(flag string, field **uint64) { 307 if flags.Changed(flag) { 308 val := *flags.Lookup(flag).Value.(*Uint64Opt).Value() 309 *field = &val 310 } 311 } 312 313 updateIsolation := func(flag string, field *container.Isolation) error { 314 if flags.Changed(flag) { 315 val, _ := flags.GetString(flag) 316 *field = container.Isolation(val) 317 } 318 return nil 319 } 320 321 cspec := spec.TaskTemplate.ContainerSpec 322 task := &spec.TaskTemplate 323 324 taskResources := func() *swarm.ResourceRequirements { 325 if task.Resources == nil { 326 task.Resources = &swarm.ResourceRequirements{} 327 } 328 if task.Resources.Limits == nil { 329 task.Resources.Limits = &swarm.Limit{} 330 } 331 if task.Resources.Reservations == nil { 332 task.Resources.Reservations = &swarm.Resources{} 333 } 334 return task.Resources 335 } 336 337 updateLabels(flags, &spec.Labels) 338 updateContainerLabels(flags, &cspec.Labels) 339 updateString("image", &cspec.Image) 340 updateStringToSlice(flags, "args", &cspec.Args) 341 updateStringToSlice(flags, flagEntrypoint, &cspec.Command) 342 updateEnvironment(flags, &cspec.Env) 343 updateString(flagWorkdir, &cspec.Dir) 344 updateString(flagUser, &cspec.User) 345 updateString(flagHostname, &cspec.Hostname) 346 updateBoolPtr(flagInit, &cspec.Init) 347 if err := updateIsolation(flagIsolation, &cspec.Isolation); err != nil { 348 return err 349 } 350 if err := updateMounts(flags, &cspec.Mounts); err != nil { 351 return err 352 } 353 354 updateSysCtls(flags, &task.ContainerSpec.Sysctls) 355 task.ContainerSpec.Ulimits = updateUlimits(flags, task.ContainerSpec.Ulimits) 356 357 if anyChanged(flags, flagLimitCPU, flagLimitMemory, flagLimitPids) { 358 taskResources().Limits = spec.TaskTemplate.Resources.Limits 359 updateInt64Value(flagLimitCPU, &task.Resources.Limits.NanoCPUs) 360 updateInt64Value(flagLimitMemory, &task.Resources.Limits.MemoryBytes) 361 updateInt64(flagLimitPids, &task.Resources.Limits.Pids) 362 } 363 364 if anyChanged(flags, flagReserveCPU, flagReserveMemory) { 365 taskResources().Reservations = spec.TaskTemplate.Resources.Reservations 366 updateInt64Value(flagReserveCPU, &task.Resources.Reservations.NanoCPUs) 367 updateInt64Value(flagReserveMemory, &task.Resources.Reservations.MemoryBytes) 368 } 369 370 if err := addGenericResources(flags, task); err != nil { 371 return err 372 } 373 374 if err := removeGenericResources(flags, task); err != nil { 375 return err 376 } 377 378 updateDurationOpt(flagStopGracePeriod, &cspec.StopGracePeriod) 379 380 if anyChanged(flags, flagRestartCondition, flagRestartDelay, flagRestartMaxAttempts, flagRestartWindow) { 381 if task.RestartPolicy == nil { 382 task.RestartPolicy = defaultRestartPolicy() 383 } 384 if flags.Changed(flagRestartCondition) { 385 value, _ := flags.GetString(flagRestartCondition) 386 task.RestartPolicy.Condition = swarm.RestartPolicyCondition(value) 387 } 388 updateDurationOpt(flagRestartDelay, &task.RestartPolicy.Delay) 389 updateUint64Opt(flagRestartMaxAttempts, &task.RestartPolicy.MaxAttempts) 390 updateDurationOpt(flagRestartWindow, &task.RestartPolicy.Window) 391 } 392 393 if anyChanged(flags, flagConstraintAdd, flagConstraintRemove) { 394 if task.Placement == nil { 395 task.Placement = &swarm.Placement{} 396 } 397 updatePlacementConstraints(flags, task.Placement) 398 } 399 400 if anyChanged(flags, flagPlacementPrefAdd, flagPlacementPrefRemove) { 401 if task.Placement == nil { 402 task.Placement = &swarm.Placement{} 403 } 404 updatePlacementPreferences(flags, task.Placement) 405 } 406 407 if anyChanged(flags, flagNetworkAdd, flagNetworkRemove) { 408 if err := updateNetworks(ctx, apiClient, flags, spec); err != nil { 409 return err 410 } 411 } 412 413 if err := updateReplicas(flags, &spec.Mode); err != nil { 414 return err 415 } 416 417 if anyChanged(flags, flagMaxReplicas) { 418 updateUint64(flagMaxReplicas, &task.Placement.MaxReplicas) 419 } 420 421 if anyChanged(flags, flagUpdateParallelism, flagUpdateDelay, flagUpdateMonitor, flagUpdateFailureAction, flagUpdateMaxFailureRatio, flagUpdateOrder) { 422 if spec.UpdateConfig == nil { 423 spec.UpdateConfig = updateConfigFromDefaults(defaults.Service.Update) 424 } 425 updateUint64(flagUpdateParallelism, &spec.UpdateConfig.Parallelism) 426 updateDuration(flagUpdateDelay, &spec.UpdateConfig.Delay) 427 updateDuration(flagUpdateMonitor, &spec.UpdateConfig.Monitor) 428 updateString(flagUpdateFailureAction, &spec.UpdateConfig.FailureAction) 429 updateFloatValue(flagUpdateMaxFailureRatio, &spec.UpdateConfig.MaxFailureRatio) 430 updateString(flagUpdateOrder, &spec.UpdateConfig.Order) 431 } 432 433 if anyChanged(flags, flagRollbackParallelism, flagRollbackDelay, flagRollbackMonitor, flagRollbackFailureAction, flagRollbackMaxFailureRatio, flagRollbackOrder) { 434 if spec.RollbackConfig == nil { 435 spec.RollbackConfig = updateConfigFromDefaults(defaults.Service.Rollback) 436 } 437 updateUint64(flagRollbackParallelism, &spec.RollbackConfig.Parallelism) 438 updateDuration(flagRollbackDelay, &spec.RollbackConfig.Delay) 439 updateDuration(flagRollbackMonitor, &spec.RollbackConfig.Monitor) 440 updateString(flagRollbackFailureAction, &spec.RollbackConfig.FailureAction) 441 updateFloatValue(flagRollbackMaxFailureRatio, &spec.RollbackConfig.MaxFailureRatio) 442 updateString(flagRollbackOrder, &spec.RollbackConfig.Order) 443 } 444 445 if flags.Changed(flagEndpointMode) { 446 value, _ := flags.GetString(flagEndpointMode) 447 if spec.EndpointSpec == nil { 448 spec.EndpointSpec = &swarm.EndpointSpec{} 449 } 450 spec.EndpointSpec.Mode = swarm.ResolutionMode(value) 451 } 452 453 if anyChanged(flags, flagGroupAdd, flagGroupRemove) { 454 if err := updateGroups(flags, &cspec.Groups); err != nil { 455 return err 456 } 457 } 458 459 if anyChanged(flags, flagPublishAdd, flagPublishRemove) { 460 if spec.EndpointSpec == nil { 461 spec.EndpointSpec = &swarm.EndpointSpec{} 462 } 463 if err := updatePorts(flags, &spec.EndpointSpec.Ports); err != nil { 464 return err 465 } 466 } 467 468 if anyChanged(flags, flagDNSAdd, flagDNSRemove, flagDNSOptionAdd, flagDNSOptionRemove, flagDNSSearchAdd, flagDNSSearchRemove) { 469 if cspec.DNSConfig == nil { 470 cspec.DNSConfig = &swarm.DNSConfig{} 471 } 472 if err := updateDNSConfig(flags, &cspec.DNSConfig); err != nil { 473 return err 474 } 475 } 476 477 if anyChanged(flags, flagHostAdd, flagHostRemove) { 478 if err := updateHosts(flags, &cspec.Hosts); err != nil { 479 return err 480 } 481 } 482 483 if err := updateLogDriver(flags, &spec.TaskTemplate); err != nil { 484 return err 485 } 486 487 force, err := flags.GetBool("force") 488 if err != nil { 489 return err 490 } 491 492 if force { 493 spec.TaskTemplate.ForceUpdate++ 494 } 495 496 if err := updateHealthcheck(flags, cspec); err != nil { 497 return err 498 } 499 500 if flags.Changed(flagTTY) { 501 tty, err := flags.GetBool(flagTTY) 502 if err != nil { 503 return err 504 } 505 cspec.TTY = tty 506 } 507 508 if flags.Changed(flagReadOnly) { 509 readOnly, err := flags.GetBool(flagReadOnly) 510 if err != nil { 511 return err 512 } 513 cspec.ReadOnly = readOnly 514 } 515 516 updateString(flagStopSignal, &cspec.StopSignal) 517 518 if anyChanged(flags, flagCapAdd, flagCapDrop) { 519 updateCapabilities(flags, cspec) 520 } 521 522 return nil 523 } 524 525 func updateStringToSlice(flags *pflag.FlagSet, flag string, field *[]string) { 526 if !flags.Changed(flag) { 527 return 528 } 529 530 *field = flags.Lookup(flag).Value.(*ShlexOpt).Value() 531 } 532 533 func anyChanged(flags *pflag.FlagSet, fields ...string) bool { 534 for _, flag := range fields { 535 if flags.Changed(flag) { 536 return true 537 } 538 } 539 return false 540 } 541 542 func addGenericResources(flags *pflag.FlagSet, spec *swarm.TaskSpec) error { 543 if !flags.Changed(flagGenericResourcesAdd) { 544 return nil 545 } 546 547 if spec.Resources == nil { 548 spec.Resources = &swarm.ResourceRequirements{} 549 } 550 551 if spec.Resources.Reservations == nil { 552 spec.Resources.Reservations = &swarm.Resources{} 553 } 554 555 values := flags.Lookup(flagGenericResourcesAdd).Value.(*opts.ListOpts).GetAll() 556 generic, err := ParseGenericResources(values) 557 if err != nil { 558 return err 559 } 560 561 m, err := buildGenericResourceMap(spec.Resources.Reservations.GenericResources) 562 if err != nil { 563 return err 564 } 565 566 for _, toAddRes := range generic { 567 m[toAddRes.DiscreteResourceSpec.Kind] = toAddRes 568 } 569 570 spec.Resources.Reservations.GenericResources = buildGenericResourceList(m) 571 572 return nil 573 } 574 575 func removeGenericResources(flags *pflag.FlagSet, spec *swarm.TaskSpec) error { 576 // Can only be Discrete Resources 577 if !flags.Changed(flagGenericResourcesRemove) { 578 return nil 579 } 580 581 if spec.Resources == nil { 582 spec.Resources = &swarm.ResourceRequirements{} 583 } 584 585 if spec.Resources.Reservations == nil { 586 spec.Resources.Reservations = &swarm.Resources{} 587 } 588 589 values := flags.Lookup(flagGenericResourcesRemove).Value.(*opts.ListOpts).GetAll() 590 591 m, err := buildGenericResourceMap(spec.Resources.Reservations.GenericResources) 592 if err != nil { 593 return err 594 } 595 596 for _, toRemoveRes := range values { 597 if _, ok := m[toRemoveRes]; !ok { 598 return fmt.Errorf("could not find generic-resource `%s` to remove it", toRemoveRes) 599 } 600 601 delete(m, toRemoveRes) 602 } 603 604 spec.Resources.Reservations.GenericResources = buildGenericResourceList(m) 605 return nil 606 } 607 608 func updatePlacementConstraints(flags *pflag.FlagSet, placement *swarm.Placement) { 609 if flags.Changed(flagConstraintAdd) { 610 values := flags.Lookup(flagConstraintAdd).Value.(*opts.ListOpts).GetAll() 611 placement.Constraints = append(placement.Constraints, values...) 612 } 613 toRemove := buildToRemoveSet(flags, flagConstraintRemove) 614 615 newConstraints := []string{} 616 for _, constraint := range placement.Constraints { 617 if _, exists := toRemove[constraint]; !exists { 618 newConstraints = append(newConstraints, constraint) 619 } 620 } 621 // Sort so that result is predictable. 622 sort.Strings(newConstraints) 623 624 placement.Constraints = newConstraints 625 } 626 627 func updatePlacementPreferences(flags *pflag.FlagSet, placement *swarm.Placement) { 628 var newPrefs []swarm.PlacementPreference 629 630 if flags.Changed(flagPlacementPrefRemove) { 631 for _, existing := range placement.Preferences { 632 removed := false 633 for _, removal := range flags.Lookup(flagPlacementPrefRemove).Value.(*placementPrefOpts).prefs { 634 if removal.Spread != nil && existing.Spread != nil && removal.Spread.SpreadDescriptor == existing.Spread.SpreadDescriptor { 635 removed = true 636 break 637 } 638 } 639 if !removed { 640 newPrefs = append(newPrefs, existing) 641 } 642 } 643 } else { 644 newPrefs = placement.Preferences 645 } 646 647 if flags.Changed(flagPlacementPrefAdd) { 648 newPrefs = append(newPrefs, 649 flags.Lookup(flagPlacementPrefAdd).Value.(*placementPrefOpts).prefs...) 650 } 651 652 placement.Preferences = newPrefs 653 } 654 655 func updateContainerLabels(flags *pflag.FlagSet, field *map[string]string) { 656 if *field != nil && flags.Changed(flagContainerLabelRemove) { 657 toRemove := flags.Lookup(flagContainerLabelRemove).Value.(*opts.ListOpts).GetAll() 658 for _, label := range toRemove { 659 delete(*field, label) 660 } 661 } 662 if flags.Changed(flagContainerLabelAdd) { 663 if *field == nil { 664 *field = map[string]string{} 665 } 666 667 values := flags.Lookup(flagContainerLabelAdd).Value.(*opts.ListOpts).GetAll() 668 for key, value := range opts.ConvertKVStringsToMap(values) { 669 (*field)[key] = value 670 } 671 } 672 } 673 674 func updateLabels(flags *pflag.FlagSet, field *map[string]string) { 675 if *field != nil && flags.Changed(flagLabelRemove) { 676 toRemove := flags.Lookup(flagLabelRemove).Value.(*opts.ListOpts).GetAll() 677 for _, label := range toRemove { 678 delete(*field, label) 679 } 680 } 681 if flags.Changed(flagLabelAdd) { 682 if *field == nil { 683 *field = map[string]string{} 684 } 685 686 values := flags.Lookup(flagLabelAdd).Value.(*opts.ListOpts).GetAll() 687 for key, value := range opts.ConvertKVStringsToMap(values) { 688 (*field)[key] = value 689 } 690 } 691 } 692 693 func updateSysCtls(flags *pflag.FlagSet, field *map[string]string) { 694 if *field != nil && flags.Changed(flagSysCtlRemove) { 695 values := flags.Lookup(flagSysCtlRemove).Value.(*opts.ListOpts).GetAll() 696 for key := range opts.ConvertKVStringsToMap(values) { 697 delete(*field, key) 698 } 699 } 700 if flags.Changed(flagSysCtlAdd) { 701 if *field == nil { 702 *field = map[string]string{} 703 } 704 705 values := flags.Lookup(flagSysCtlAdd).Value.(*opts.ListOpts).GetAll() 706 for key, value := range opts.ConvertKVStringsToMap(values) { 707 (*field)[key] = value 708 } 709 } 710 } 711 712 func updateUlimits(flags *pflag.FlagSet, ulimits []*units.Ulimit) []*units.Ulimit { 713 newUlimits := make(map[string]*units.Ulimit) 714 715 for _, ulimit := range ulimits { 716 newUlimits[ulimit.Name] = ulimit 717 } 718 if flags.Changed(flagUlimitRemove) { 719 values := flags.Lookup(flagUlimitRemove).Value.(*opts.ListOpts).GetAll() 720 for key := range opts.ConvertKVStringsToMap(values) { 721 delete(newUlimits, key) 722 } 723 } 724 725 if flags.Changed(flagUlimitAdd) { 726 for _, ulimit := range flags.Lookup(flagUlimitAdd).Value.(*opts.UlimitOpt).GetList() { 727 newUlimits[ulimit.Name] = ulimit 728 } 729 } 730 if len(newUlimits) == 0 { 731 return nil 732 } 733 limits := make([]*units.Ulimit, 0, len(newUlimits)) 734 for _, ulimit := range newUlimits { 735 limits = append(limits, ulimit) 736 } 737 sort.SliceStable(limits, func(i, j int) bool { 738 return limits[i].Name < limits[j].Name 739 }) 740 return limits 741 } 742 743 func updateEnvironment(flags *pflag.FlagSet, field *[]string) { 744 toRemove := buildToRemoveSet(flags, flagEnvRemove) 745 *field = removeItems(*field, toRemove, envKey) 746 747 if flags.Changed(flagEnvAdd) { 748 envSet := map[string]string{} 749 for _, v := range *field { 750 envSet[envKey(v)] = v 751 } 752 753 value := flags.Lookup(flagEnvAdd).Value.(*opts.ListOpts) 754 for _, v := range value.GetAll() { 755 envSet[envKey(v)] = v 756 } 757 758 *field = []string{} 759 for _, v := range envSet { 760 *field = append(*field, v) 761 } 762 } 763 } 764 765 func getUpdatedSecrets(ctx context.Context, apiClient client.SecretAPIClient, flags *pflag.FlagSet, secrets []*swarm.SecretReference) ([]*swarm.SecretReference, error) { 766 newSecrets := []*swarm.SecretReference{} 767 768 toRemove := buildToRemoveSet(flags, flagSecretRemove) 769 for _, secret := range secrets { 770 if _, exists := toRemove[secret.SecretName]; !exists { 771 newSecrets = append(newSecrets, secret) 772 } 773 } 774 775 if flags.Changed(flagSecretAdd) { 776 values := flags.Lookup(flagSecretAdd).Value.(*opts.SecretOpt).Value() 777 778 addSecrets, err := ParseSecrets(ctx, apiClient, values) 779 if err != nil { 780 return nil, err 781 } 782 newSecrets = append(newSecrets, addSecrets...) 783 } 784 785 return newSecrets, nil 786 } 787 788 func getUpdatedConfigs(ctx context.Context, apiClient client.ConfigAPIClient, flags *pflag.FlagSet, spec *swarm.ContainerSpec) ([]*swarm.ConfigReference, error) { 789 var ( 790 // credSpecConfigName stores the name of the config specified by the 791 // credential-spec flag. if a Runtime target Config with this name is 792 // already in the containerSpec, then this value will be set to 793 // emptystring in the removeConfigs stage. otherwise, a ConfigReference 794 // will be created to pass to ParseConfigs to get the ConfigID. 795 credSpecConfigName string 796 // credSpecConfigID stores the ID of the credential spec config if that 797 // config is being carried over from the old set of references 798 credSpecConfigID string 799 ) 800 801 if flags.Changed(flagCredentialSpec) { 802 credSpec := flags.Lookup(flagCredentialSpec).Value.(*credentialSpecOpt).Value() 803 credSpecConfigName = credSpec.Config 804 } else { //nolint:gocritic // ignore elseif: can replace 'else {if cond {}}' with 'else if cond {}' 805 // if the credential spec flag has not changed, then check if there 806 // already is a credentialSpec. if there is one, and it's for a Config, 807 // then it's from the old object, and its value is the config ID. we 808 // need this so we don't remove the config if the credential spec is 809 // not being updated. 810 if spec.Privileges != nil && spec.Privileges.CredentialSpec != nil { 811 if config := spec.Privileges.CredentialSpec.Config; config != "" { 812 credSpecConfigID = config 813 } 814 } 815 } 816 817 newConfigs := removeConfigs(flags, spec, credSpecConfigName, credSpecConfigID) 818 819 // resolveConfigs is a slice of any new configs that need to have the ID 820 // resolved 821 resolveConfigs := []*swarm.ConfigReference{} 822 823 if flags.Changed(flagConfigAdd) { 824 resolveConfigs = append(resolveConfigs, flags.Lookup(flagConfigAdd).Value.(*opts.ConfigOpt).Value()...) 825 } 826 827 // if credSpecConfigNameis non-empty at this point, it means its a new 828 // config, and we need to resolve its ID accordingly. 829 if credSpecConfigName != "" { 830 resolveConfigs = append(resolveConfigs, &swarm.ConfigReference{ 831 ConfigName: credSpecConfigName, 832 Runtime: &swarm.ConfigReferenceRuntimeTarget{}, 833 }) 834 } 835 836 if len(resolveConfigs) > 0 { 837 addConfigs, err := ParseConfigs(ctx, apiClient, resolveConfigs) 838 if err != nil { 839 return nil, err 840 } 841 newConfigs = append(newConfigs, addConfigs...) 842 } 843 844 return newConfigs, nil 845 } 846 847 // removeConfigs figures out which configs in the existing spec should be kept 848 // after the update. 849 func removeConfigs(flags *pflag.FlagSet, spec *swarm.ContainerSpec, credSpecName, credSpecID string) []*swarm.ConfigReference { 850 keepConfigs := []*swarm.ConfigReference{} 851 852 toRemove := buildToRemoveSet(flags, flagConfigRemove) 853 // all configs in spec.Configs should have both a Name and ID, because 854 // they come from an already-accepted spec. 855 for _, config := range spec.Configs { 856 // if the config is a Runtime target, make sure it's still in use right 857 // now, the only use for Runtime target is credential specs. if, in 858 // the future, more uses are added, then this check will need to be 859 // made more intelligent. 860 if config.Runtime != nil { 861 // if we're carrying over a credential spec explicitly (because the 862 // user passed --credential-spec with the same config name) then we 863 // should match on credSpecName. if we're carrying over a 864 // credential spec implicitly (because the user did not pass any 865 // --credential-spec flag) then we should match on credSpecID. in 866 // either case, we're keeping the config that already exists. 867 if config.ConfigName == credSpecName || config.ConfigID == credSpecID { 868 keepConfigs = append(keepConfigs, config) 869 } 870 // continue the loop, to skip the part where we check if the config 871 // is in toRemove. 872 continue 873 } 874 875 if _, exists := toRemove[config.ConfigName]; !exists { 876 keepConfigs = append(keepConfigs, config) 877 } 878 } 879 880 return keepConfigs 881 } 882 883 func envKey(value string) string { 884 k, _, _ := strings.Cut(value, "=") 885 return k 886 } 887 888 func buildToRemoveSet(flags *pflag.FlagSet, flag string) map[string]struct{} { 889 var empty struct{} 890 toRemove := make(map[string]struct{}) 891 892 if !flags.Changed(flag) { 893 return toRemove 894 } 895 896 toRemoveSlice := flags.Lookup(flag).Value.(*opts.ListOpts).GetAll() 897 for _, key := range toRemoveSlice { 898 toRemove[key] = empty 899 } 900 return toRemove 901 } 902 903 func removeItems( 904 seq []string, 905 toRemove map[string]struct{}, 906 keyFunc func(string) string, 907 ) []string { 908 newSeq := []string{} 909 for _, item := range seq { 910 if _, exists := toRemove[keyFunc(item)]; !exists { 911 newSeq = append(newSeq, item) 912 } 913 } 914 return newSeq 915 } 916 917 func updateMounts(flags *pflag.FlagSet, mounts *[]mounttypes.Mount) error { 918 mountsByTarget := map[string]mounttypes.Mount{} 919 920 if flags.Changed(flagMountAdd) { 921 values := flags.Lookup(flagMountAdd).Value.(*opts.MountOpt).Value() 922 for _, mount := range values { 923 if _, ok := mountsByTarget[mount.Target]; ok { 924 return errors.Errorf("duplicate mount target") 925 } 926 mountsByTarget[mount.Target] = mount 927 } 928 } 929 930 // Add old list of mount points minus updated one. 931 for _, mount := range *mounts { 932 if _, ok := mountsByTarget[mount.Target]; !ok { 933 mountsByTarget[mount.Target] = mount 934 } 935 } 936 937 newMounts := []mounttypes.Mount{} 938 939 toRemove := buildToRemoveSet(flags, flagMountRemove) 940 941 for _, mount := range mountsByTarget { 942 if _, exists := toRemove[mount.Target]; !exists { 943 newMounts = append(newMounts, mount) 944 } 945 } 946 sort.Slice(newMounts, func(i, j int) bool { 947 a, b := newMounts[i], newMounts[j] 948 949 if a.Source == b.Source { 950 return a.Target < b.Target 951 } 952 953 return a.Source < b.Source 954 }) 955 *mounts = newMounts 956 return nil 957 } 958 959 func updateGroups(flags *pflag.FlagSet, groups *[]string) error { 960 if flags.Changed(flagGroupAdd) { 961 values := flags.Lookup(flagGroupAdd).Value.(*opts.ListOpts).GetAll() 962 *groups = append(*groups, values...) 963 } 964 toRemove := buildToRemoveSet(flags, flagGroupRemove) 965 966 newGroups := []string{} 967 for _, group := range *groups { 968 if _, exists := toRemove[group]; !exists { 969 newGroups = append(newGroups, group) 970 } 971 } 972 // Sort so that result is predictable. 973 sort.Strings(newGroups) 974 975 *groups = newGroups 976 return nil 977 } 978 979 func removeDuplicates(entries []string) []string { 980 hit := map[string]bool{} 981 newEntries := []string{} 982 for _, v := range entries { 983 if !hit[v] { 984 newEntries = append(newEntries, v) 985 hit[v] = true 986 } 987 } 988 return newEntries 989 } 990 991 func updateDNSConfig(flags *pflag.FlagSet, config **swarm.DNSConfig) error { 992 newConfig := &swarm.DNSConfig{} 993 994 nameservers := (*config).Nameservers 995 if flags.Changed(flagDNSAdd) { 996 values := flags.Lookup(flagDNSAdd).Value.(*opts.ListOpts).GetAll() 997 nameservers = append(nameservers, values...) 998 } 999 nameservers = removeDuplicates(nameservers) 1000 toRemove := buildToRemoveSet(flags, flagDNSRemove) 1001 for _, nameserver := range nameservers { 1002 if _, exists := toRemove[nameserver]; !exists { 1003 newConfig.Nameservers = append(newConfig.Nameservers, nameserver) 1004 } 1005 } 1006 // Sort so that result is predictable. 1007 sort.Strings(newConfig.Nameservers) 1008 1009 search := (*config).Search 1010 if flags.Changed(flagDNSSearchAdd) { 1011 values := flags.Lookup(flagDNSSearchAdd).Value.(*opts.ListOpts).GetAll() 1012 search = append(search, values...) 1013 } 1014 search = removeDuplicates(search) 1015 toRemove = buildToRemoveSet(flags, flagDNSSearchRemove) 1016 for _, entry := range search { 1017 if _, exists := toRemove[entry]; !exists { 1018 newConfig.Search = append(newConfig.Search, entry) 1019 } 1020 } 1021 // Sort so that result is predictable. 1022 sort.Strings(newConfig.Search) 1023 1024 options := (*config).Options 1025 if flags.Changed(flagDNSOptionAdd) { 1026 values := flags.Lookup(flagDNSOptionAdd).Value.(*opts.ListOpts).GetAll() 1027 options = append(options, values...) 1028 } 1029 options = removeDuplicates(options) 1030 toRemove = buildToRemoveSet(flags, flagDNSOptionRemove) 1031 for _, option := range options { 1032 if _, exists := toRemove[option]; !exists { 1033 newConfig.Options = append(newConfig.Options, option) 1034 } 1035 } 1036 // Sort so that result is predictable. 1037 sort.Strings(newConfig.Options) 1038 1039 *config = newConfig 1040 return nil 1041 } 1042 1043 func portConfigToString(portConfig *swarm.PortConfig) string { 1044 protocol := portConfig.Protocol 1045 mode := portConfig.PublishMode 1046 return fmt.Sprintf("%v:%v/%s/%s", portConfig.PublishedPort, portConfig.TargetPort, protocol, mode) 1047 } 1048 1049 func updatePorts(flags *pflag.FlagSet, portConfig *[]swarm.PortConfig) error { 1050 // The key of the map is `port/protocol`, e.g., `80/tcp` 1051 portSet := map[string]swarm.PortConfig{} 1052 1053 // Build the current list of portConfig 1054 for _, entry := range *portConfig { 1055 entry := entry 1056 if _, ok := portSet[portConfigToString(&entry)]; !ok { 1057 portSet[portConfigToString(&entry)] = entry 1058 } 1059 } 1060 1061 newPorts := []swarm.PortConfig{} 1062 1063 // Clean current ports 1064 toRemove := flags.Lookup(flagPublishRemove).Value.(*opts.PortOpt).Value() 1065 portLoop: 1066 for _, port := range portSet { 1067 for _, pConfig := range toRemove { 1068 if equalProtocol(port.Protocol, pConfig.Protocol) && 1069 port.TargetPort == pConfig.TargetPort && 1070 equalPublishMode(port.PublishMode, pConfig.PublishMode) { 1071 continue portLoop 1072 } 1073 } 1074 1075 newPorts = append(newPorts, port) 1076 } 1077 1078 // Check to see if there are any conflict in flags. 1079 if flags.Changed(flagPublishAdd) { 1080 ports := flags.Lookup(flagPublishAdd).Value.(*opts.PortOpt).Value() 1081 1082 for _, port := range ports { 1083 port := port 1084 if _, ok := portSet[portConfigToString(&port)]; ok { 1085 continue 1086 } 1087 // portSet[portConfigToString(&port)] = port 1088 newPorts = append(newPorts, port) 1089 } 1090 } 1091 1092 // Sort the PortConfig to avoid unnecessary updates 1093 sort.Slice(newPorts, func(i, j int) bool { 1094 // We convert PortConfig into `port/protocol`, e.g., `80/tcp` 1095 // In updatePorts we already filter out with map so there is duplicate entries 1096 return portConfigToString(&newPorts[i]) < portConfigToString(&newPorts[j]) 1097 }) 1098 *portConfig = newPorts 1099 return nil 1100 } 1101 1102 func equalProtocol(prot1, prot2 swarm.PortConfigProtocol) bool { 1103 return prot1 == prot2 || 1104 (prot1 == swarm.PortConfigProtocol("") && prot2 == swarm.PortConfigProtocolTCP) || 1105 (prot2 == swarm.PortConfigProtocol("") && prot1 == swarm.PortConfigProtocolTCP) 1106 } 1107 1108 func equalPublishMode(mode1, mode2 swarm.PortConfigPublishMode) bool { 1109 return mode1 == mode2 || 1110 (mode1 == swarm.PortConfigPublishMode("") && mode2 == swarm.PortConfigPublishModeIngress) || 1111 (mode2 == swarm.PortConfigPublishMode("") && mode1 == swarm.PortConfigPublishModeIngress) 1112 } 1113 1114 func updateReplicas(flags *pflag.FlagSet, serviceMode *swarm.ServiceMode) error { 1115 if !flags.Changed(flagReplicas) { 1116 return nil 1117 } 1118 1119 if serviceMode == nil || serviceMode.Replicated == nil { 1120 return errors.Errorf("replicas can only be used with replicated mode") 1121 } 1122 serviceMode.Replicated.Replicas = flags.Lookup(flagReplicas).Value.(*Uint64Opt).Value() 1123 return nil 1124 } 1125 1126 type hostMapping struct { 1127 IPAddr string 1128 Host string 1129 } 1130 1131 // updateHosts performs a diff between existing host entries, entries to be 1132 // removed, and entries to be added. Host entries preserve the order in which they 1133 // were added, as the specification mentions that in case multiple entries for a 1134 // host exist, the first entry should be used (by default). 1135 // 1136 // Note that, even though unsupported by the CLI, the service specs format 1137 // allow entries with both a _canonical_ hostname, and one or more aliases 1138 // in an entry (IP-address canonical_hostname [alias ...]) 1139 // 1140 // Entries can be removed by either a specific `<host-name>:<ip-address>` mapping, 1141 // or by `<host>` alone: 1142 // 1143 // - If both IP-address and host-name is provided, the hostname is removed only 1144 // from entries that match the given IP-address. 1145 // - If only a host-name is provided, the hostname is removed from any entry it 1146 // is part of (either as canonical host-name, or as alias). 1147 // - If, after removing the host-name from an entry, no host-names remain in 1148 // the entry, the entry itself is removed. 1149 // 1150 // For example, the list of host-entries before processing could look like this: 1151 // 1152 // hosts = &[]string{ 1153 // "127.0.0.2 host3 host1 host2 host4", 1154 // "127.0.0.1 host1 host4", 1155 // "127.0.0.3 host1", 1156 // "127.0.0.1 host1", 1157 // } 1158 // 1159 // Removing `host1` removes every occurrence: 1160 // 1161 // hosts = &[]string{ 1162 // "127.0.0.2 host3 host2 host4", 1163 // "127.0.0.1 host4", 1164 // } 1165 // 1166 // Removing `host1:127.0.0.1` on the other hand, only remove the host if the 1167 // IP-address matches: 1168 // 1169 // hosts = &[]string{ 1170 // "127.0.0.2 host3 host1 host2 host4", 1171 // "127.0.0.1 host4", 1172 // "127.0.0.3 host1", 1173 // } 1174 func updateHosts(flags *pflag.FlagSet, hosts *[]string) error { 1175 var toRemove []hostMapping 1176 if flags.Changed(flagHostRemove) { 1177 extraHostsToRemove := flags.Lookup(flagHostRemove).Value.(*opts.ListOpts).GetAll() 1178 for _, entry := range extraHostsToRemove { 1179 hostName, ipAddr, _ := strings.Cut(entry, ":") 1180 toRemove = append(toRemove, hostMapping{IPAddr: ipAddr, Host: hostName}) 1181 } 1182 } 1183 1184 var newHosts []string 1185 for _, entry := range *hosts { 1186 // Since this is in SwarmKit format, we need to find the key, which is canonical_hostname of: 1187 // IP_address canonical_hostname [aliases...] 1188 parts := strings.Fields(entry) 1189 if len(parts) == 0 { 1190 continue 1191 } 1192 ip := parts[0] 1193 hostNames := parts[1:] 1194 for _, rm := range toRemove { 1195 if rm.IPAddr != "" && rm.IPAddr != ip { 1196 continue 1197 } 1198 for i, h := range hostNames { 1199 if h == rm.Host { 1200 hostNames = append(hostNames[:i], hostNames[i+1:]...) 1201 } 1202 } 1203 } 1204 if len(hostNames) > 0 { 1205 newHosts = append(newHosts, fmt.Sprintf("%s %s", ip, strings.Join(hostNames, " "))) 1206 } 1207 } 1208 1209 // Append new hosts (in SwarmKit format) 1210 if flags.Changed(flagHostAdd) { 1211 values := convertExtraHostsToSwarmHosts(flags.Lookup(flagHostAdd).Value.(*opts.ListOpts).GetAll()) 1212 newHosts = append(newHosts, values...) 1213 } 1214 *hosts = removeDuplicates(newHosts) 1215 return nil 1216 } 1217 1218 // updateLogDriver updates the log driver only if the log driver flag is set. 1219 // All options will be replaced with those provided on the command line. 1220 func updateLogDriver(flags *pflag.FlagSet, taskTemplate *swarm.TaskSpec) error { 1221 if !flags.Changed(flagLogDriver) { 1222 return nil 1223 } 1224 1225 name, err := flags.GetString(flagLogDriver) 1226 if err != nil { 1227 return err 1228 } 1229 1230 if name == "" { 1231 return nil 1232 } 1233 1234 taskTemplate.LogDriver = &swarm.Driver{ 1235 Name: name, 1236 Options: opts.ConvertKVStringsToMap(flags.Lookup(flagLogOpt).Value.(*opts.ListOpts).GetAll()), 1237 } 1238 1239 return nil 1240 } 1241 1242 func updateHealthcheck(flags *pflag.FlagSet, containerSpec *swarm.ContainerSpec) error { 1243 if !anyChanged(flags, flagNoHealthcheck, flagHealthCmd, flagHealthInterval, flagHealthRetries, flagHealthTimeout, flagHealthStartPeriod) { 1244 return nil 1245 } 1246 if containerSpec.Healthcheck == nil { 1247 containerSpec.Healthcheck = &container.HealthConfig{} 1248 } 1249 noHealthcheck, err := flags.GetBool(flagNoHealthcheck) 1250 if err != nil { 1251 return err 1252 } 1253 if noHealthcheck { 1254 if !anyChanged(flags, flagHealthCmd, flagHealthInterval, flagHealthRetries, flagHealthTimeout, flagHealthStartPeriod) { 1255 containerSpec.Healthcheck = &container.HealthConfig{ 1256 Test: []string{"NONE"}, 1257 } 1258 return nil 1259 } 1260 return errors.Errorf("--%s conflicts with --health-* options", flagNoHealthcheck) 1261 } 1262 if len(containerSpec.Healthcheck.Test) > 0 && containerSpec.Healthcheck.Test[0] == "NONE" { 1263 containerSpec.Healthcheck.Test = nil 1264 } 1265 if flags.Changed(flagHealthInterval) { 1266 val := *flags.Lookup(flagHealthInterval).Value.(*opts.PositiveDurationOpt).Value() 1267 containerSpec.Healthcheck.Interval = val 1268 } 1269 if flags.Changed(flagHealthTimeout) { 1270 val := *flags.Lookup(flagHealthTimeout).Value.(*opts.PositiveDurationOpt).Value() 1271 containerSpec.Healthcheck.Timeout = val 1272 } 1273 if flags.Changed(flagHealthStartPeriod) { 1274 val := *flags.Lookup(flagHealthStartPeriod).Value.(*opts.PositiveDurationOpt).Value() 1275 containerSpec.Healthcheck.StartPeriod = val 1276 } 1277 if flags.Changed(flagHealthRetries) { 1278 containerSpec.Healthcheck.Retries, _ = flags.GetInt(flagHealthRetries) 1279 } 1280 if flags.Changed(flagHealthCmd) { 1281 cmd, _ := flags.GetString(flagHealthCmd) 1282 if cmd != "" { 1283 containerSpec.Healthcheck.Test = []string{"CMD-SHELL", cmd} 1284 } else { 1285 containerSpec.Healthcheck.Test = nil 1286 } 1287 } 1288 return nil 1289 } 1290 1291 func updateNetworks(ctx context.Context, apiClient client.NetworkAPIClient, flags *pflag.FlagSet, spec *swarm.ServiceSpec) error { 1292 // spec.TaskTemplate.Networks takes precedence over the deprecated 1293 // spec.Networks field. If spec.Network is in use, we'll migrate those 1294 // values to spec.TaskTemplate.Networks. 1295 specNetworks := spec.TaskTemplate.Networks 1296 if len(specNetworks) == 0 { 1297 specNetworks = spec.Networks //nolint:staticcheck // ignore SA1019: field is deprecated. 1298 } 1299 spec.Networks = nil //nolint:staticcheck // ignore SA1019: field is deprecated. 1300 1301 toRemove := buildToRemoveSet(flags, flagNetworkRemove) 1302 idsToRemove := make(map[string]struct{}) 1303 for networkIDOrName := range toRemove { 1304 network, err := apiClient.NetworkInspect(ctx, networkIDOrName, types.NetworkInspectOptions{Scope: "swarm"}) 1305 if err != nil { 1306 return err 1307 } 1308 idsToRemove[network.ID] = struct{}{} 1309 } 1310 1311 existingNetworks := make(map[string]struct{}) 1312 var newNetworks []swarm.NetworkAttachmentConfig //nolint:prealloc 1313 for _, network := range specNetworks { 1314 if _, exists := idsToRemove[network.Target]; exists { 1315 continue 1316 } 1317 1318 newNetworks = append(newNetworks, network) 1319 existingNetworks[network.Target] = struct{}{} 1320 } 1321 1322 if flags.Changed(flagNetworkAdd) { 1323 values := flags.Lookup(flagNetworkAdd).Value.(*opts.NetworkOpt) 1324 networks := convertNetworks(*values) 1325 for _, network := range networks { 1326 nwID, err := resolveNetworkID(ctx, apiClient, network.Target) 1327 if err != nil { 1328 return err 1329 } 1330 if _, exists := existingNetworks[nwID]; exists { 1331 return errors.Errorf("service is already attached to network %s", network.Target) 1332 } 1333 network.Target = nwID 1334 newNetworks = append(newNetworks, network) 1335 existingNetworks[network.Target] = struct{}{} 1336 } 1337 } 1338 1339 sort.Slice(newNetworks, func(i, j int) bool { 1340 return newNetworks[i].Target < newNetworks[j].Target 1341 }) 1342 1343 spec.TaskTemplate.Networks = newNetworks 1344 return nil 1345 } 1346 1347 // updateCredSpecConfig updates the value of the credential spec Config field 1348 // to the config ID if the credential spec has changed. it mutates the passed 1349 // spec. it does not handle the case where the credential spec specifies a 1350 // config that does not exist -- that case is handled as part of 1351 // getUpdatedConfigs 1352 func updateCredSpecConfig(flags *pflag.FlagSet, containerSpec *swarm.ContainerSpec) { 1353 if flags.Changed(flagCredentialSpec) { 1354 credSpecOpt := flags.Lookup(flagCredentialSpec) 1355 // if the flag has changed, and the value is empty string, then we 1356 // should remove any credential spec that might be present 1357 if credSpecOpt.Value.String() == "" { 1358 if containerSpec.Privileges != nil { 1359 containerSpec.Privileges.CredentialSpec = nil 1360 } 1361 return 1362 } 1363 1364 // otherwise, set the credential spec to be the parsed value 1365 credSpec := credSpecOpt.Value.(*credentialSpecOpt).Value() 1366 1367 // if this is a Config credential spec, we still need to replace the 1368 // value of credSpec.Config with the config ID instead of Name. 1369 if credSpec.Config != "" { 1370 for _, config := range containerSpec.Configs { 1371 // if the config name matches, then set the config ID. we do 1372 // not need to worry about if this is a Runtime target or not. 1373 // even if it is not a Runtime target, getUpdatedConfigs 1374 // ensures that a Runtime target for this config exists, and 1375 // the Name is unique so the ID is correct no matter the 1376 // target. 1377 if config.ConfigName == credSpec.Config { 1378 credSpec.Config = config.ConfigID 1379 break 1380 } 1381 } 1382 } 1383 1384 if containerSpec.Privileges == nil { 1385 containerSpec.Privileges = &swarm.Privileges{} 1386 } 1387 1388 containerSpec.Privileges.CredentialSpec = credSpec 1389 } 1390 } 1391 1392 // updateCapabilities calculates the list of capabilities to "drop" and to "add" 1393 // after applying the capabilities passed through `--cap-add` and `--cap-drop` 1394 // to the existing list of added/dropped capabilities in the service spec. 1395 // 1396 // Adding capabilities takes precedence over "dropping" the same capability, so 1397 // if both `--cap-add` and `--cap-drop` are specifying the same capability, the 1398 // `--cap-drop` is ignored. 1399 // 1400 // Capabilities to "drop" are removed from the existing list of "added" 1401 // capabilities, and vice-versa (capabilities to "add" are removed from the existing 1402 // list of capabilities to "drop"). 1403 // 1404 // Capabilities are normalized, sorted, and duplicates are removed to prevent 1405 // service tasks from being updated if no changes are made. If a list has the "ALL" 1406 // capability set, then any other capability is removed from that list. 1407 // 1408 // Adding/removing capabilities when updating a service is handled as a tri-state; 1409 // 1410 // - if the capability was previously "dropped", then remove it from "CapabilityDrop", 1411 // but NOT added to "CapabilityAdd". However, if the capability was not yet in 1412 // the service's "CapabilityDrop", then it's simply added to the service's "CapabilityAdd" 1413 // - likewise, if the capability was previously "added", then it's removed from 1414 // "CapabilityAdd", but NOT added to "CapabilityDrop". If the capability was 1415 // not yet in the service's "CapabilityAdd", then simply add it to the service's 1416 // "CapabilityDrop". 1417 // 1418 // In other words, given a service with the following: 1419 // 1420 // | CapDrop | CapAdd | 1421 // |----------------|---------------| 1422 // | CAP_SOME_CAP | | 1423 // 1424 // When updating the service, and applying `--cap-add CAP_SOME_CAP`, the previously 1425 // dropped capability is removed: 1426 // 1427 // | CapDrop | CapAdd | 1428 // |----------------|---------------| 1429 // | | | 1430 // 1431 // After updating the service a second time, applying `--cap-add CAP_SOME_CAP`, 1432 // capability is now added: 1433 // 1434 // | CapDrop | CapAdd | 1435 // |----------------|---------------| 1436 // | | CAP_SOME_CAP | 1437 func updateCapabilities(flags *pflag.FlagSet, containerSpec *swarm.ContainerSpec) { 1438 var ( 1439 toAdd, toDrop map[string]bool 1440 1441 capDrop = opts.CapabilitiesMap(containerSpec.CapabilityDrop) 1442 capAdd = opts.CapabilitiesMap(containerSpec.CapabilityAdd) 1443 ) 1444 if flags.Changed(flagCapAdd) { 1445 toAdd = opts.CapabilitiesMap(flags.Lookup(flagCapAdd).Value.(*opts.ListOpts).GetAll()) 1446 if toAdd[opts.ResetCapabilities] { 1447 capAdd = make(map[string]bool) 1448 delete(toAdd, opts.ResetCapabilities) 1449 } 1450 } 1451 if flags.Changed(flagCapDrop) { 1452 toDrop = opts.CapabilitiesMap(flags.Lookup(flagCapDrop).Value.(*opts.ListOpts).GetAll()) 1453 if toDrop[opts.ResetCapabilities] { 1454 capDrop = make(map[string]bool) 1455 delete(toDrop, opts.ResetCapabilities) 1456 } 1457 } 1458 1459 // First remove the capabilities to "drop" from the service's exiting 1460 // list of capabilities to "add". If a capability is both added and dropped 1461 // on update, then "adding" takes precedence. 1462 // 1463 // Dropping a capability when updating a service is considered a tri-state; 1464 // 1465 // - if the capability was previously "added", then remove it from 1466 // "CapabilityAdd", and do NOT add it to "CapabilityDrop" 1467 // - if the capability was not yet in the service's "CapabilityAdd", 1468 // then simply add it to the service's "CapabilityDrop" 1469 for c := range toDrop { 1470 if !toAdd[c] { 1471 if capAdd[c] { 1472 delete(capAdd, c) 1473 } else { 1474 capDrop[c] = true 1475 } 1476 } 1477 } 1478 1479 // And remove the capabilities we're "adding" from the service's existing 1480 // list of capabilities to "drop". 1481 // 1482 // "Adding" capabilities takes precedence over "dropping" them, so if a 1483 // capability is set both as "add" and "drop", remove the capability from 1484 // the service's list of dropped capabilities (if present). 1485 // 1486 // Adding a capability when updating a service is considered a tri-state; 1487 // 1488 // - if the capability was previously "dropped", then remove it from 1489 // "CapabilityDrop", and do NOT add it to "CapabilityAdd" 1490 // - if the capability was not yet in the service's "CapabilityDrop", 1491 // then simply add it to the service's "CapabilityAdd" 1492 for c := range toAdd { 1493 if capDrop[c] { 1494 delete(capDrop, c) 1495 } else { 1496 capAdd[c] = true 1497 } 1498 } 1499 1500 // Now that the service's existing lists are updated, apply the new 1501 // capabilities to add/drop to both lists. Sort the lists to prevent 1502 // unneeded updates to service-tasks. 1503 containerSpec.CapabilityDrop = capsList(capDrop) 1504 containerSpec.CapabilityAdd = capsList(capAdd) 1505 } 1506 1507 func capsList(caps map[string]bool) []string { 1508 if len(caps) == 0 { 1509 return nil 1510 } 1511 if caps[opts.AllCapabilities] { 1512 return []string{opts.AllCapabilities} 1513 } 1514 out := make([]string, 0, len(caps)) 1515 for c := range caps { 1516 out = append(out, c) 1517 } 1518 sort.Strings(out) 1519 return out 1520 }