github.1git.de/docker/cli@v26.1.3+incompatible/cli/command/service/formatter.go (about) 1 package service 2 3 import ( 4 "fmt" 5 "sort" 6 "strconv" 7 "strings" 8 "time" 9 10 "github.com/distribution/reference" 11 "github.com/docker/cli/cli/command/formatter" 12 "github.com/docker/cli/cli/command/inspect" 13 "github.com/docker/docker/api/types" 14 "github.com/docker/docker/api/types/container" 15 mounttypes "github.com/docker/docker/api/types/mount" 16 "github.com/docker/docker/api/types/swarm" 17 "github.com/docker/docker/pkg/stringid" 18 units "github.com/docker/go-units" 19 "github.com/fvbommel/sortorder" 20 "github.com/pkg/errors" 21 ) 22 23 const serviceInspectPrettyTemplate formatter.Format = ` 24 ID: {{.ID}} 25 Name: {{.Name}} 26 {{- if .Labels }} 27 Labels: 28 {{- range $k, $v := .Labels }} 29 {{ $k }}{{if $v }}={{ $v }}{{ end }} 30 {{- end }}{{ end }} 31 Service Mode: 32 {{- if .IsModeGlobal }} Global 33 {{- else if .IsModeReplicated }} Replicated 34 {{- if .ModeReplicatedReplicas }} 35 Replicas: {{ .ModeReplicatedReplicas }} 36 {{- end }}{{ end }} 37 {{- if .HasUpdateStatus }} 38 UpdateStatus: 39 State: {{ .UpdateStatusState }} 40 {{- if .HasUpdateStatusStarted }} 41 Started: {{ .UpdateStatusStarted }} 42 {{- end }} 43 {{- if .UpdateIsCompleted }} 44 Completed: {{ .UpdateStatusCompleted }} 45 {{- end }} 46 Message: {{ .UpdateStatusMessage }} 47 {{- end }} 48 Placement: 49 {{- if .TaskPlacementConstraints }} 50 Constraints: {{ .TaskPlacementConstraints }} 51 {{- end }} 52 {{- if .TaskPlacementPreferences }} 53 Preferences: {{ .TaskPlacementPreferences }} 54 {{- end }} 55 {{- if .MaxReplicas }} 56 Max Replicas Per Node: {{ .MaxReplicas }} 57 {{- end }} 58 {{- if .HasUpdateConfig }} 59 UpdateConfig: 60 Parallelism: {{ .UpdateParallelism }} 61 {{- if .HasUpdateDelay}} 62 Delay: {{ .UpdateDelay }} 63 {{- end }} 64 On failure: {{ .UpdateOnFailure }} 65 {{- if .HasUpdateMonitor}} 66 Monitoring Period: {{ .UpdateMonitor }} 67 {{- end }} 68 Max failure ratio: {{ .UpdateMaxFailureRatio }} 69 Update order: {{ .UpdateOrder }} 70 {{- end }} 71 {{- if .HasRollbackConfig }} 72 RollbackConfig: 73 Parallelism: {{ .RollbackParallelism }} 74 {{- if .HasRollbackDelay}} 75 Delay: {{ .RollbackDelay }} 76 {{- end }} 77 On failure: {{ .RollbackOnFailure }} 78 {{- if .HasRollbackMonitor}} 79 Monitoring Period: {{ .RollbackMonitor }} 80 {{- end }} 81 Max failure ratio: {{ .RollbackMaxFailureRatio }} 82 Rollback order: {{ .RollbackOrder }} 83 {{- end }} 84 ContainerSpec: 85 Image: {{ .ContainerImage }} 86 {{- if .ContainerArgs }} 87 Args: {{ range $arg := .ContainerArgs }}{{ $arg }} {{ end }} 88 {{- end -}} 89 {{- if .ContainerEnv }} 90 Env: {{ range $env := .ContainerEnv }}{{ $env }} {{ end }} 91 {{- end -}} 92 {{- if .ContainerWorkDir }} 93 Dir: {{ .ContainerWorkDir }} 94 {{- end -}} 95 {{- if .HasContainerInit }} 96 Init: {{ .ContainerInit }} 97 {{- end -}} 98 {{- if .ContainerUser }} 99 User: {{ .ContainerUser }} 100 {{- end }} 101 {{- if .HasCapabilities }} 102 Capabilities: 103 {{- if .HasCapabilityAdd }} 104 Add: {{ .CapabilityAdd }} 105 {{- end }} 106 {{- if .HasCapabilityDrop }} 107 Drop: {{ .CapabilityDrop }} 108 {{- end }} 109 {{- end }} 110 {{- if .ContainerSysCtls }} 111 SysCtls: 112 {{- range $k, $v := .ContainerSysCtls }} 113 {{ $k }}{{if $v }}: {{ $v }}{{ end }} 114 {{- end }}{{ end }} 115 {{- if .ContainerUlimits }} 116 Ulimits: 117 {{- range $k, $v := .ContainerUlimits }} 118 {{ $k }}: {{ $v }} 119 {{- end }}{{ end }} 120 {{- if .ContainerMounts }} 121 Mounts: 122 {{- end }} 123 {{- range $mount := .ContainerMounts }} 124 Target: {{ $mount.Target }} 125 Source: {{ $mount.Source }} 126 ReadOnly: {{ $mount.ReadOnly }} 127 Type: {{ $mount.Type }} 128 {{- end -}} 129 {{- if .Configs}} 130 Configs: 131 {{- range $config := .Configs }} 132 Target: {{$config.File.Name}} 133 Source: {{$config.ConfigName}} 134 {{- end }}{{ end }} 135 {{- if .Secrets }} 136 Secrets: 137 {{- range $secret := .Secrets }} 138 Target: {{$secret.File.Name}} 139 Source: {{$secret.SecretName}} 140 {{- end }}{{ end }} 141 {{- if .HasLogDriver }} 142 Log Driver: 143 {{- if .HasLogDriverName }} 144 Name: {{ .LogDriverName }} 145 {{- end }} 146 {{- if .LogOpts }} 147 LogOpts: 148 {{- range $k, $v := .LogOpts }} 149 {{ $k }}{{if $v }}: {{ $v }}{{ end }} 150 {{- end }}{{ end }} 151 {{ end }} 152 {{- if .HasResources }} 153 Resources: 154 {{- if .HasResourceReservations }} 155 Reservations: 156 {{- if gt .ResourceReservationNanoCPUs 0.0 }} 157 CPU: {{ .ResourceReservationNanoCPUs }} 158 {{- end }} 159 {{- if .ResourceReservationMemory }} 160 Memory: {{ .ResourceReservationMemory }} 161 {{- end }}{{ end }} 162 {{- if .HasResourceLimits }} 163 Limits: 164 {{- if gt .ResourceLimitsNanoCPUs 0.0 }} 165 CPU: {{ .ResourceLimitsNanoCPUs }} 166 {{- end }} 167 {{- if .ResourceLimitMemory }} 168 Memory: {{ .ResourceLimitMemory }} 169 {{- end }}{{ end }}{{ end }} 170 {{- if gt .ResourceLimitPids 0 }} 171 PIDs: {{ .ResourceLimitPids }} 172 {{- end }} 173 {{- if .Networks }} 174 Networks: 175 {{- range $network := .Networks }} {{ $network }}{{ end }} {{ end }} 176 Endpoint Mode: {{ .EndpointMode }} 177 {{- if .Ports }} 178 Ports: 179 {{- range $port := .Ports }} 180 PublishedPort = {{ $port.PublishedPort }} 181 Protocol = {{ $port.Protocol }} 182 TargetPort = {{ $port.TargetPort }} 183 PublishMode = {{ $port.PublishMode }} 184 {{- end }} {{ end -}} 185 {{- if .Healthcheck }} 186 Healthcheck: 187 Interval = {{ .Healthcheck.Interval }} 188 Retries = {{ .Healthcheck.Retries }} 189 StartPeriod = {{ .Healthcheck.StartPeriod }} 190 Timeout = {{ .Healthcheck.Timeout }} 191 {{- if .Healthcheck.Test }} 192 Tests: 193 {{- range $test := .Healthcheck.Test }} 194 Test = {{ $test }} 195 {{- end }} {{ end -}} 196 {{- end }} 197 ` 198 199 // NewFormat returns a Format for rendering using a Context 200 func NewFormat(source string) formatter.Format { 201 switch source { 202 case formatter.PrettyFormatKey: 203 return serviceInspectPrettyTemplate 204 default: 205 return formatter.Format(strings.TrimPrefix(source, formatter.RawFormatKey)) 206 } 207 } 208 209 func resolveNetworks(service swarm.Service, getNetwork inspect.GetRefFunc) map[string]string { 210 networkNames := make(map[string]string) 211 for _, network := range service.Spec.TaskTemplate.Networks { 212 if resolved, _, err := getNetwork(network.Target); err == nil { 213 if resolvedNetwork, ok := resolved.(types.NetworkResource); ok { 214 networkNames[resolvedNetwork.ID] = resolvedNetwork.Name 215 } 216 } 217 } 218 return networkNames 219 } 220 221 // InspectFormatWrite renders the context for a list of services 222 func InspectFormatWrite(ctx formatter.Context, refs []string, getRef, getNetwork inspect.GetRefFunc) error { 223 if ctx.Format != serviceInspectPrettyTemplate { 224 return inspect.Inspect(ctx.Output, refs, string(ctx.Format), getRef) 225 } 226 render := func(format func(subContext formatter.SubContext) error) error { 227 for _, ref := range refs { 228 serviceI, _, err := getRef(ref) 229 if err != nil { 230 return err 231 } 232 service, ok := serviceI.(swarm.Service) 233 if !ok { 234 return errors.Errorf("got wrong object to inspect") 235 } 236 if err := format(&serviceInspectContext{Service: service, networkNames: resolveNetworks(service, getNetwork)}); err != nil { 237 return err 238 } 239 } 240 return nil 241 } 242 return ctx.Write(&serviceInspectContext{}, render) 243 } 244 245 type serviceInspectContext struct { 246 swarm.Service 247 formatter.SubContext 248 249 // networkNames is a map from network IDs (as found in 250 // Networks[x].Target) to network names. 251 networkNames map[string]string 252 } 253 254 func (ctx *serviceInspectContext) MarshalJSON() ([]byte, error) { 255 return formatter.MarshalJSON(ctx) 256 } 257 258 func (ctx *serviceInspectContext) ID() string { 259 return ctx.Service.ID 260 } 261 262 func (ctx *serviceInspectContext) Name() string { 263 return ctx.Service.Spec.Name 264 } 265 266 func (ctx *serviceInspectContext) Labels() map[string]string { 267 return ctx.Service.Spec.Labels 268 } 269 270 func (ctx *serviceInspectContext) HasLogDriver() bool { 271 return ctx.Service.Spec.TaskTemplate.LogDriver != nil 272 } 273 274 func (ctx *serviceInspectContext) HasLogDriverName() bool { 275 return ctx.Service.Spec.TaskTemplate.LogDriver.Name != "" 276 } 277 278 func (ctx *serviceInspectContext) LogDriverName() string { 279 return ctx.Service.Spec.TaskTemplate.LogDriver.Name 280 } 281 282 func (ctx *serviceInspectContext) LogOpts() map[string]string { 283 return ctx.Service.Spec.TaskTemplate.LogDriver.Options 284 } 285 286 func (ctx *serviceInspectContext) Configs() []*swarm.ConfigReference { 287 return ctx.Service.Spec.TaskTemplate.ContainerSpec.Configs 288 } 289 290 func (ctx *serviceInspectContext) Secrets() []*swarm.SecretReference { 291 return ctx.Service.Spec.TaskTemplate.ContainerSpec.Secrets 292 } 293 294 func (ctx *serviceInspectContext) Healthcheck() *container.HealthConfig { 295 return ctx.Service.Spec.TaskTemplate.ContainerSpec.Healthcheck 296 } 297 298 func (ctx *serviceInspectContext) IsModeGlobal() bool { 299 return ctx.Service.Spec.Mode.Global != nil 300 } 301 302 func (ctx *serviceInspectContext) IsModeReplicated() bool { 303 return ctx.Service.Spec.Mode.Replicated != nil 304 } 305 306 func (ctx *serviceInspectContext) ModeReplicatedReplicas() *uint64 { 307 return ctx.Service.Spec.Mode.Replicated.Replicas 308 } 309 310 func (ctx *serviceInspectContext) HasUpdateStatus() bool { 311 return ctx.Service.UpdateStatus != nil && ctx.Service.UpdateStatus.State != "" 312 } 313 314 func (ctx *serviceInspectContext) UpdateStatusState() swarm.UpdateState { 315 return ctx.Service.UpdateStatus.State 316 } 317 318 func (ctx *serviceInspectContext) HasUpdateStatusStarted() bool { 319 return ctx.Service.UpdateStatus.StartedAt != nil 320 } 321 322 func (ctx *serviceInspectContext) UpdateStatusStarted() string { 323 return units.HumanDuration(time.Since(*ctx.Service.UpdateStatus.StartedAt)) + " ago" 324 } 325 326 func (ctx *serviceInspectContext) UpdateIsCompleted() bool { 327 return ctx.Service.UpdateStatus.State == swarm.UpdateStateCompleted && ctx.Service.UpdateStatus.CompletedAt != nil 328 } 329 330 func (ctx *serviceInspectContext) UpdateStatusCompleted() string { 331 return units.HumanDuration(time.Since(*ctx.Service.UpdateStatus.CompletedAt)) + " ago" 332 } 333 334 func (ctx *serviceInspectContext) UpdateStatusMessage() string { 335 return ctx.Service.UpdateStatus.Message 336 } 337 338 func (ctx *serviceInspectContext) TaskPlacementConstraints() []string { 339 if ctx.Service.Spec.TaskTemplate.Placement != nil { 340 return ctx.Service.Spec.TaskTemplate.Placement.Constraints 341 } 342 return nil 343 } 344 345 func (ctx *serviceInspectContext) TaskPlacementPreferences() []string { 346 if ctx.Service.Spec.TaskTemplate.Placement == nil { 347 return nil 348 } 349 var out []string 350 for _, pref := range ctx.Service.Spec.TaskTemplate.Placement.Preferences { 351 if pref.Spread != nil { 352 out = append(out, "spread="+pref.Spread.SpreadDescriptor) 353 } 354 } 355 return out 356 } 357 358 func (ctx *serviceInspectContext) MaxReplicas() uint64 { 359 if ctx.Service.Spec.TaskTemplate.Placement != nil { 360 return ctx.Service.Spec.TaskTemplate.Placement.MaxReplicas 361 } 362 return 0 363 } 364 365 func (ctx *serviceInspectContext) HasUpdateConfig() bool { 366 return ctx.Service.Spec.UpdateConfig != nil 367 } 368 369 func (ctx *serviceInspectContext) UpdateParallelism() uint64 { 370 return ctx.Service.Spec.UpdateConfig.Parallelism 371 } 372 373 func (ctx *serviceInspectContext) HasUpdateDelay() bool { 374 return ctx.Service.Spec.UpdateConfig.Delay.Nanoseconds() > 0 375 } 376 377 func (ctx *serviceInspectContext) UpdateDelay() time.Duration { 378 return ctx.Service.Spec.UpdateConfig.Delay 379 } 380 381 func (ctx *serviceInspectContext) UpdateOnFailure() string { 382 return ctx.Service.Spec.UpdateConfig.FailureAction 383 } 384 385 func (ctx *serviceInspectContext) UpdateOrder() string { 386 return ctx.Service.Spec.UpdateConfig.Order 387 } 388 389 func (ctx *serviceInspectContext) HasUpdateMonitor() bool { 390 return ctx.Service.Spec.UpdateConfig.Monitor.Nanoseconds() > 0 391 } 392 393 func (ctx *serviceInspectContext) UpdateMonitor() time.Duration { 394 return ctx.Service.Spec.UpdateConfig.Monitor 395 } 396 397 func (ctx *serviceInspectContext) UpdateMaxFailureRatio() float32 { 398 return ctx.Service.Spec.UpdateConfig.MaxFailureRatio 399 } 400 401 func (ctx *serviceInspectContext) HasRollbackConfig() bool { 402 return ctx.Service.Spec.RollbackConfig != nil 403 } 404 405 func (ctx *serviceInspectContext) RollbackParallelism() uint64 { 406 return ctx.Service.Spec.RollbackConfig.Parallelism 407 } 408 409 func (ctx *serviceInspectContext) HasRollbackDelay() bool { 410 return ctx.Service.Spec.RollbackConfig.Delay.Nanoseconds() > 0 411 } 412 413 func (ctx *serviceInspectContext) RollbackDelay() time.Duration { 414 return ctx.Service.Spec.RollbackConfig.Delay 415 } 416 417 func (ctx *serviceInspectContext) RollbackOnFailure() string { 418 return ctx.Service.Spec.RollbackConfig.FailureAction 419 } 420 421 func (ctx *serviceInspectContext) HasRollbackMonitor() bool { 422 return ctx.Service.Spec.RollbackConfig.Monitor.Nanoseconds() > 0 423 } 424 425 func (ctx *serviceInspectContext) RollbackMonitor() time.Duration { 426 return ctx.Service.Spec.RollbackConfig.Monitor 427 } 428 429 func (ctx *serviceInspectContext) RollbackMaxFailureRatio() float32 { 430 return ctx.Service.Spec.RollbackConfig.MaxFailureRatio 431 } 432 433 func (ctx *serviceInspectContext) RollbackOrder() string { 434 return ctx.Service.Spec.RollbackConfig.Order 435 } 436 437 func (ctx *serviceInspectContext) ContainerImage() string { 438 return ctx.Service.Spec.TaskTemplate.ContainerSpec.Image 439 } 440 441 func (ctx *serviceInspectContext) ContainerArgs() []string { 442 return ctx.Service.Spec.TaskTemplate.ContainerSpec.Args 443 } 444 445 func (ctx *serviceInspectContext) ContainerEnv() []string { 446 return ctx.Service.Spec.TaskTemplate.ContainerSpec.Env 447 } 448 449 func (ctx *serviceInspectContext) ContainerWorkDir() string { 450 return ctx.Service.Spec.TaskTemplate.ContainerSpec.Dir 451 } 452 453 func (ctx *serviceInspectContext) ContainerUser() string { 454 return ctx.Service.Spec.TaskTemplate.ContainerSpec.User 455 } 456 457 func (ctx *serviceInspectContext) HasContainerInit() bool { 458 return ctx.Service.Spec.TaskTemplate.ContainerSpec.Init != nil 459 } 460 461 func (ctx *serviceInspectContext) ContainerInit() bool { 462 return *ctx.Service.Spec.TaskTemplate.ContainerSpec.Init 463 } 464 465 func (ctx *serviceInspectContext) ContainerMounts() []mounttypes.Mount { 466 return ctx.Service.Spec.TaskTemplate.ContainerSpec.Mounts 467 } 468 469 func (ctx *serviceInspectContext) ContainerSysCtls() map[string]string { 470 return ctx.Service.Spec.TaskTemplate.ContainerSpec.Sysctls 471 } 472 473 func (ctx *serviceInspectContext) HasContainerSysCtls() bool { 474 return len(ctx.Service.Spec.TaskTemplate.ContainerSpec.Sysctls) > 0 475 } 476 477 func (ctx *serviceInspectContext) ContainerUlimits() map[string]string { 478 ulimits := map[string]string{} 479 480 for _, u := range ctx.Service.Spec.TaskTemplate.ContainerSpec.Ulimits { 481 ulimits[u.Name] = fmt.Sprintf("%d:%d", u.Soft, u.Hard) 482 } 483 484 return ulimits 485 } 486 487 func (ctx *serviceInspectContext) HasContainerUlimits() bool { 488 return len(ctx.Service.Spec.TaskTemplate.ContainerSpec.Ulimits) > 0 489 } 490 491 func (ctx *serviceInspectContext) HasResources() bool { 492 return ctx.Service.Spec.TaskTemplate.Resources != nil 493 } 494 495 func (ctx *serviceInspectContext) HasResourceReservations() bool { 496 if ctx.Service.Spec.TaskTemplate.Resources == nil || ctx.Service.Spec.TaskTemplate.Resources.Reservations == nil { 497 return false 498 } 499 return ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs > 0 || ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes > 0 500 } 501 502 func (ctx *serviceInspectContext) ResourceReservationNanoCPUs() float64 { 503 if ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs == 0 { 504 return float64(0) 505 } 506 return float64(ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs) / 1e9 507 } 508 509 func (ctx *serviceInspectContext) ResourceReservationMemory() string { 510 if ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes == 0 { 511 return "" 512 } 513 return units.BytesSize(float64(ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes)) 514 } 515 516 func (ctx *serviceInspectContext) HasResourceLimits() bool { 517 if ctx.Service.Spec.TaskTemplate.Resources == nil || ctx.Service.Spec.TaskTemplate.Resources.Limits == nil { 518 return false 519 } 520 return ctx.Service.Spec.TaskTemplate.Resources.Limits.NanoCPUs > 0 || ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes > 0 || ctx.Service.Spec.TaskTemplate.Resources.Limits.Pids > 0 521 } 522 523 func (ctx *serviceInspectContext) ResourceLimitsNanoCPUs() float64 { 524 return float64(ctx.Service.Spec.TaskTemplate.Resources.Limits.NanoCPUs) / 1e9 525 } 526 527 func (ctx *serviceInspectContext) ResourceLimitMemory() string { 528 if ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes == 0 { 529 return "" 530 } 531 return units.BytesSize(float64(ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes)) 532 } 533 534 func (ctx *serviceInspectContext) ResourceLimitPids() int64 { 535 if ctx.Service.Spec.TaskTemplate.Resources == nil || ctx.Service.Spec.TaskTemplate.Resources.Limits == nil { 536 return 0 537 } 538 return ctx.Service.Spec.TaskTemplate.Resources.Limits.Pids 539 } 540 541 func (ctx *serviceInspectContext) Networks() []string { 542 var out []string 543 for _, n := range ctx.Service.Spec.TaskTemplate.Networks { 544 if name, ok := ctx.networkNames[n.Target]; ok { 545 out = append(out, name) 546 } else { 547 out = append(out, n.Target) 548 } 549 } 550 return out 551 } 552 553 func (ctx *serviceInspectContext) EndpointMode() string { 554 if ctx.Service.Spec.EndpointSpec == nil { 555 return "" 556 } 557 558 return string(ctx.Service.Spec.EndpointSpec.Mode) 559 } 560 561 func (ctx *serviceInspectContext) Ports() []swarm.PortConfig { 562 return ctx.Service.Endpoint.Ports 563 } 564 565 func (ctx *serviceInspectContext) HasCapabilities() bool { 566 return len(ctx.Service.Spec.TaskTemplate.ContainerSpec.CapabilityAdd) > 0 || len(ctx.Service.Spec.TaskTemplate.ContainerSpec.CapabilityDrop) > 0 567 } 568 569 func (ctx *serviceInspectContext) HasCapabilityAdd() bool { 570 return len(ctx.Service.Spec.TaskTemplate.ContainerSpec.CapabilityAdd) > 0 571 } 572 573 func (ctx *serviceInspectContext) HasCapabilityDrop() bool { 574 return len(ctx.Service.Spec.TaskTemplate.ContainerSpec.CapabilityDrop) > 0 575 } 576 577 func (ctx *serviceInspectContext) CapabilityAdd() string { 578 return strings.Join(ctx.Service.Spec.TaskTemplate.ContainerSpec.CapabilityAdd, ", ") 579 } 580 581 func (ctx *serviceInspectContext) CapabilityDrop() string { 582 return strings.Join(ctx.Service.Spec.TaskTemplate.ContainerSpec.CapabilityDrop, ", ") 583 } 584 585 const ( 586 defaultServiceTableFormat = "table {{.ID}}\t{{.Name}}\t{{.Mode}}\t{{.Replicas}}\t{{.Image}}\t{{.Ports}}" 587 588 serviceIDHeader = "ID" 589 modeHeader = "MODE" 590 replicasHeader = "REPLICAS" 591 ) 592 593 // NewListFormat returns a Format for rendering using a service Context 594 func NewListFormat(source string, quiet bool) formatter.Format { 595 switch source { 596 case formatter.TableFormatKey: 597 if quiet { 598 return formatter.DefaultQuietFormat 599 } 600 return defaultServiceTableFormat 601 case formatter.RawFormatKey: 602 if quiet { 603 return `id: {{.ID}}` 604 } 605 return `id: {{.ID}}\nname: {{.Name}}\nmode: {{.Mode}}\nreplicas: {{.Replicas}}\nimage: {{.Image}}\nports: {{.Ports}}\n` 606 } 607 return formatter.Format(source) 608 } 609 610 // ListFormatWrite writes the context 611 func ListFormatWrite(ctx formatter.Context, services []swarm.Service) error { 612 render := func(format func(subContext formatter.SubContext) error) error { 613 sort.Slice(services, func(i, j int) bool { 614 return sortorder.NaturalLess(services[i].Spec.Name, services[j].Spec.Name) 615 }) 616 for _, service := range services { 617 serviceCtx := &serviceContext{service: service} 618 if err := format(serviceCtx); err != nil { 619 return err 620 } 621 } 622 return nil 623 } 624 serviceCtx := serviceContext{} 625 serviceCtx.Header = formatter.SubHeaderContext{ 626 "ID": serviceIDHeader, 627 "Name": formatter.NameHeader, 628 "Mode": modeHeader, 629 "Replicas": replicasHeader, 630 "Image": formatter.ImageHeader, 631 "Ports": formatter.PortsHeader, 632 } 633 return ctx.Write(&serviceCtx, render) 634 } 635 636 type serviceContext struct { 637 formatter.HeaderContext 638 service swarm.Service 639 } 640 641 func (c *serviceContext) MarshalJSON() ([]byte, error) { 642 return formatter.MarshalJSON(c) 643 } 644 645 func (c *serviceContext) ID() string { 646 return stringid.TruncateID(c.service.ID) 647 } 648 649 func (c *serviceContext) Name() string { 650 return c.service.Spec.Name 651 } 652 653 func (c *serviceContext) Mode() string { 654 switch { 655 case c.service.Spec.Mode.Global != nil: 656 return "global" 657 case c.service.Spec.Mode.Replicated != nil: 658 return "replicated" 659 case c.service.Spec.Mode.ReplicatedJob != nil: 660 return "replicated job" 661 case c.service.Spec.Mode.GlobalJob != nil: 662 return "global job" 663 default: 664 return "" 665 } 666 } 667 668 func (c *serviceContext) Replicas() string { 669 s := &c.service 670 671 var running, desired, completed uint64 672 if s.ServiceStatus != nil { 673 running = c.service.ServiceStatus.RunningTasks 674 desired = c.service.ServiceStatus.DesiredTasks 675 completed = c.service.ServiceStatus.CompletedTasks 676 } 677 // for jobs, we will not include the max per node, even if it is set. jobs 678 // include instead the progress of the job as a whole, in addition to the 679 // current running state. the system respects max per node, but if we 680 // included it in the list output, the lines for jobs would be entirely too 681 // long and make the UI look bad. 682 if s.Spec.Mode.ReplicatedJob != nil { 683 return fmt.Sprintf( 684 "%d/%d (%d/%d completed)", 685 running, desired, completed, *s.Spec.Mode.ReplicatedJob.TotalCompletions, 686 ) 687 } 688 if s.Spec.Mode.GlobalJob != nil { 689 // for global jobs, we need to do a little math. desired tasks are only 690 // the tasks that have not yet actually reached the Completed state. 691 // Completed tasks have reached the completed state. the TOTAL number 692 // of tasks to run is the sum of the tasks desired to still complete, 693 // and the tasks actually completed. 694 return fmt.Sprintf( 695 "%d/%d (%d/%d completed)", 696 running, desired, completed, desired+completed, 697 ) 698 } 699 if r := c.maxReplicas(); r > 0 { 700 return fmt.Sprintf("%d/%d (max %d per node)", running, desired, r) 701 } 702 return fmt.Sprintf("%d/%d", running, desired) 703 } 704 705 func (c *serviceContext) maxReplicas() uint64 { 706 if c.Mode() != "replicated" || c.service.Spec.TaskTemplate.Placement == nil { 707 return 0 708 } 709 return c.service.Spec.TaskTemplate.Placement.MaxReplicas 710 } 711 712 func (c *serviceContext) Image() string { 713 var image string 714 if c.service.Spec.TaskTemplate.ContainerSpec != nil { 715 image = c.service.Spec.TaskTemplate.ContainerSpec.Image 716 } 717 if ref, err := reference.ParseNormalizedNamed(image); err == nil { 718 // update image string for display, (strips any digest) 719 if nt, ok := ref.(reference.NamedTagged); ok { 720 if namedTagged, err := reference.WithTag(reference.TrimNamed(nt), nt.Tag()); err == nil { 721 image = reference.FamiliarString(namedTagged) 722 } 723 } 724 } 725 726 return image 727 } 728 729 type portRange struct { 730 pStart uint32 731 pEnd uint32 732 tStart uint32 733 tEnd uint32 734 protocol swarm.PortConfigProtocol 735 } 736 737 func (pr portRange) String() string { 738 var ( 739 pub string 740 tgt string 741 ) 742 743 if pr.pEnd > pr.pStart { 744 pub = fmt.Sprintf("%d-%d", pr.pStart, pr.pEnd) 745 } else { 746 pub = strconv.FormatUint(uint64(pr.pStart), 10) 747 } 748 if pr.tEnd > pr.tStart { 749 tgt = fmt.Sprintf("%d-%d", pr.tStart, pr.tEnd) 750 } else { 751 tgt = strconv.FormatUint(uint64(pr.tStart), 10) 752 } 753 return fmt.Sprintf("*:%s->%s/%s", pub, tgt, pr.protocol) 754 } 755 756 // Ports formats published ports on the ingress network for output. 757 // 758 // Where possible, ranges are grouped to produce a compact output: 759 // - multiple ports mapped to a single port (80->80, 81->80); is formatted as *:80-81->80 760 // - multiple consecutive ports on both sides; (80->80, 81->81) are formatted as: *:80-81->80-81 761 // 762 // The above should not be grouped together, i.e.: 763 // - 80->80, 81->81, 82->80 should be presented as : *:80-81->80-81, *:82->80 764 // 765 // TODO improve: 766 // - combine non-consecutive ports mapped to a single port (80->80, 81->80, 84->80, 86->80, 87->80); to be printed as *:80-81,84,86-87->80 767 // - combine tcp and udp mappings if their port-mapping is exactly the same (*:80-81->80-81/tcp+udp instead of *:80-81->80-81/tcp, *:80-81->80-81/udp) 768 func (c *serviceContext) Ports() string { 769 if c.service.Endpoint.Ports == nil { 770 return "" 771 } 772 773 pr := portRange{} 774 ports := []string{} 775 776 servicePorts := c.service.Endpoint.Ports 777 sort.Slice(servicePorts, func(i, j int) bool { 778 if servicePorts[i].Protocol == servicePorts[j].Protocol { 779 return servicePorts[i].PublishedPort < servicePorts[j].PublishedPort 780 } 781 return servicePorts[i].Protocol < servicePorts[j].Protocol 782 }) 783 784 for _, p := range c.service.Endpoint.Ports { 785 if p.PublishMode == swarm.PortConfigPublishModeIngress { 786 prIsRange := pr.tEnd != pr.tStart 787 tOverlaps := p.TargetPort <= pr.tEnd 788 789 // Start a new port-range if: 790 // - the protocol is different from the current port-range 791 // - published or target port are not consecutive to the current port-range 792 // - the current port-range is a _range_, and the target port overlaps with the current range's target-ports 793 if p.Protocol != pr.protocol || p.PublishedPort-pr.pEnd > 1 || p.TargetPort-pr.tEnd > 1 || prIsRange && tOverlaps { 794 // start a new port-range, and print the previous port-range (if any) 795 if pr.pStart > 0 { 796 ports = append(ports, pr.String()) 797 } 798 pr = portRange{ 799 pStart: p.PublishedPort, 800 pEnd: p.PublishedPort, 801 tStart: p.TargetPort, 802 tEnd: p.TargetPort, 803 protocol: p.Protocol, 804 } 805 continue 806 } 807 pr.pEnd = p.PublishedPort 808 pr.tEnd = p.TargetPort 809 } 810 } 811 if pr.pStart > 0 { 812 ports = append(ports, pr.String()) 813 } 814 return strings.Join(ports, ", ") 815 }