github.com/Heebron/moby@v0.0.0-20221111184709-6eab4f55faf7/daemon/cluster/services.go (about)

     1  package cluster // import "github.com/docker/docker/daemon/cluster"
     2  
     3  import (
     4  	"context"
     5  	"encoding/base64"
     6  	"encoding/json"
     7  	"fmt"
     8  	"io"
     9  	"os"
    10  	"strconv"
    11  	"strings"
    12  	"time"
    13  
    14  	"github.com/docker/distribution/reference"
    15  	"github.com/docker/docker/api/types"
    16  	"github.com/docker/docker/api/types/backend"
    17  	"github.com/docker/docker/api/types/registry"
    18  	"github.com/docker/docker/api/types/swarm"
    19  	timetypes "github.com/docker/docker/api/types/time"
    20  	"github.com/docker/docker/daemon/cluster/convert"
    21  	"github.com/docker/docker/errdefs"
    22  	runconfigopts "github.com/docker/docker/runconfig/opts"
    23  	gogotypes "github.com/gogo/protobuf/types"
    24  	swarmapi "github.com/moby/swarmkit/v2/api"
    25  	"github.com/pkg/errors"
    26  	"github.com/sirupsen/logrus"
    27  	"google.golang.org/grpc"
    28  )
    29  
    30  // GetServices returns all services of a managed swarm cluster.
    31  func (c *Cluster) GetServices(options types.ServiceListOptions) ([]swarm.Service, error) {
    32  	c.mu.RLock()
    33  	defer c.mu.RUnlock()
    34  
    35  	state := c.currentNodeState()
    36  	if !state.IsActiveManager() {
    37  		return nil, c.errNoManager(state)
    38  	}
    39  
    40  	// We move the accepted filter check here as "mode" filter
    41  	// is processed in the daemon, not in SwarmKit. So it might
    42  	// be good to have accepted file check in the same file as
    43  	// the filter processing (in the for loop below).
    44  	accepted := map[string]bool{
    45  		"name":    true,
    46  		"id":      true,
    47  		"label":   true,
    48  		"mode":    true,
    49  		"runtime": true,
    50  	}
    51  	if err := options.Filters.Validate(accepted); err != nil {
    52  		return nil, err
    53  	}
    54  
    55  	if len(options.Filters.Get("runtime")) == 0 {
    56  		// Default to using the container runtime filter
    57  		options.Filters.Add("runtime", string(swarm.RuntimeContainer))
    58  	}
    59  
    60  	filters := &swarmapi.ListServicesRequest_Filters{
    61  		NamePrefixes: options.Filters.Get("name"),
    62  		IDPrefixes:   options.Filters.Get("id"),
    63  		Labels:       runconfigopts.ConvertKVStringsToMap(options.Filters.Get("label")),
    64  		Runtimes:     options.Filters.Get("runtime"),
    65  	}
    66  
    67  	ctx, cancel := c.getRequestContext()
    68  	defer cancel()
    69  
    70  	r, err := state.controlClient.ListServices(
    71  		ctx,
    72  		&swarmapi.ListServicesRequest{Filters: filters},
    73  		grpc.MaxCallRecvMsgSize(defaultRecvSizeForListResponse),
    74  	)
    75  	if err != nil {
    76  		return nil, err
    77  	}
    78  
    79  	services := make([]swarm.Service, 0, len(r.Services))
    80  
    81  	// if the  user requests the service statuses, we'll store the IDs needed
    82  	// in this slice
    83  	var serviceIDs []string
    84  	if options.Status {
    85  		serviceIDs = make([]string, 0, len(r.Services))
    86  	}
    87  	for _, service := range r.Services {
    88  		if options.Filters.Contains("mode") {
    89  			var mode string
    90  			switch service.Spec.GetMode().(type) {
    91  			case *swarmapi.ServiceSpec_Global:
    92  				mode = "global"
    93  			case *swarmapi.ServiceSpec_Replicated:
    94  				mode = "replicated"
    95  			case *swarmapi.ServiceSpec_ReplicatedJob:
    96  				mode = "replicated-job"
    97  			case *swarmapi.ServiceSpec_GlobalJob:
    98  				mode = "global-job"
    99  			}
   100  
   101  			if !options.Filters.ExactMatch("mode", mode) {
   102  				continue
   103  			}
   104  		}
   105  		if options.Status {
   106  			serviceIDs = append(serviceIDs, service.ID)
   107  		}
   108  		svcs, err := convert.ServiceFromGRPC(*service)
   109  		if err != nil {
   110  			return nil, err
   111  		}
   112  		services = append(services, svcs)
   113  	}
   114  
   115  	if options.Status {
   116  		// Listing service statuses is a separate call because, while it is the
   117  		// most common UI operation, it is still just a UI operation, and it
   118  		// would be improper to include this data in swarm's Service object.
   119  		// We pay the cost with some complexity here, but this is still way
   120  		// more efficient than marshalling and unmarshalling all the JSON
   121  		// needed to list tasks and get this data otherwise client-side
   122  		resp, err := state.controlClient.ListServiceStatuses(
   123  			ctx,
   124  			&swarmapi.ListServiceStatusesRequest{Services: serviceIDs},
   125  			grpc.MaxCallRecvMsgSize(defaultRecvSizeForListResponse),
   126  		)
   127  		if err != nil {
   128  			return nil, err
   129  		}
   130  
   131  		// we'll need to match up statuses in the response with the services in
   132  		// the list operation. if we did this by operating on two lists, the
   133  		// result would be quadratic. instead, make a mapping of service IDs to
   134  		// service statuses so that this is roughly linear. additionally,
   135  		// convert the status response to an engine api service status here.
   136  		serviceMap := map[string]*swarm.ServiceStatus{}
   137  		for _, status := range resp.Statuses {
   138  			serviceMap[status.ServiceID] = &swarm.ServiceStatus{
   139  				RunningTasks:   status.RunningTasks,
   140  				DesiredTasks:   status.DesiredTasks,
   141  				CompletedTasks: status.CompletedTasks,
   142  			}
   143  		}
   144  
   145  		// because this is a list of values and not pointers, make sure we
   146  		// actually alter the value when iterating.
   147  		for i, service := range services {
   148  			// the return value of the ListServiceStatuses operation is
   149  			// guaranteed to contain a value in the response for every argument
   150  			// in the request, so we can safely do this assignment. and even if
   151  			// it wasn't, and the service ID was for some reason absent from
   152  			// this map, the resulting value of service.Status would just be
   153  			// nil -- the same thing it was before
   154  			service.ServiceStatus = serviceMap[service.ID]
   155  			services[i] = service
   156  		}
   157  	}
   158  
   159  	return services, nil
   160  }
   161  
   162  // GetService returns a service based on an ID or name.
   163  func (c *Cluster) GetService(input string, insertDefaults bool) (swarm.Service, error) {
   164  	var service *swarmapi.Service
   165  	if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
   166  		s, err := getService(ctx, state.controlClient, input, insertDefaults)
   167  		if err != nil {
   168  			return err
   169  		}
   170  		service = s
   171  		return nil
   172  	}); err != nil {
   173  		return swarm.Service{}, err
   174  	}
   175  	svc, err := convert.ServiceFromGRPC(*service)
   176  	if err != nil {
   177  		return swarm.Service{}, err
   178  	}
   179  	return svc, nil
   180  }
   181  
   182  // CreateService creates a new service in a managed swarm cluster.
   183  func (c *Cluster) CreateService(s swarm.ServiceSpec, encodedAuth string, queryRegistry bool) (*types.ServiceCreateResponse, error) {
   184  	var resp *types.ServiceCreateResponse
   185  	err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
   186  		err := c.populateNetworkID(ctx, state.controlClient, &s)
   187  		if err != nil {
   188  			return err
   189  		}
   190  
   191  		serviceSpec, err := convert.ServiceSpecToGRPC(s)
   192  		if err != nil {
   193  			return errdefs.InvalidParameter(err)
   194  		}
   195  
   196  		resp = &types.ServiceCreateResponse{}
   197  
   198  		switch serviceSpec.Task.Runtime.(type) {
   199  		case *swarmapi.TaskSpec_Attachment:
   200  			return fmt.Errorf("invalid task spec: spec type %q not supported", swarm.RuntimeNetworkAttachment)
   201  		// handle other runtimes here
   202  		case *swarmapi.TaskSpec_Generic:
   203  			switch serviceSpec.Task.GetGeneric().Kind {
   204  			case string(swarm.RuntimePlugin):
   205  				if !c.config.Backend.HasExperimental() {
   206  					return fmt.Errorf("runtime type %q only supported in experimental", swarm.RuntimePlugin)
   207  				}
   208  				if s.TaskTemplate.PluginSpec == nil {
   209  					return errors.New("plugin spec must be set")
   210  				}
   211  
   212  			default:
   213  				return fmt.Errorf("unsupported runtime type: %q", serviceSpec.Task.GetGeneric().Kind)
   214  			}
   215  
   216  			r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec})
   217  			if err != nil {
   218  				return err
   219  			}
   220  
   221  			resp.ID = r.Service.ID
   222  		case *swarmapi.TaskSpec_Container:
   223  			ctnr := serviceSpec.Task.GetContainer()
   224  			if ctnr == nil {
   225  				return errors.New("service does not use container tasks")
   226  			}
   227  			if encodedAuth != "" {
   228  				ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
   229  			}
   230  
   231  			// retrieve auth config from encoded auth
   232  			authConfig := &registry.AuthConfig{}
   233  			if encodedAuth != "" {
   234  				authReader := strings.NewReader(encodedAuth)
   235  				dec := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, authReader))
   236  				if err := dec.Decode(authConfig); err != nil {
   237  					logrus.Warnf("invalid authconfig: %v", err)
   238  				}
   239  			}
   240  
   241  			// pin image by digest for API versions < 1.30
   242  			// TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE"
   243  			// should be removed in the future. Since integration tests only use the
   244  			// latest API version, so this is no longer required.
   245  			if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry {
   246  				digestImage, err := c.imageWithDigestString(ctx, ctnr.Image, authConfig)
   247  				if err != nil {
   248  					logrus.Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error())
   249  					// warning in the client response should be concise
   250  					resp.Warnings = append(resp.Warnings, digestWarning(ctnr.Image))
   251  				} else if ctnr.Image != digestImage {
   252  					logrus.Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage)
   253  					ctnr.Image = digestImage
   254  				} else {
   255  					logrus.Debugf("creating service using supplied digest reference %s", ctnr.Image)
   256  				}
   257  
   258  				// Replace the context with a fresh one.
   259  				// If we timed out while communicating with the
   260  				// registry, then "ctx" will already be expired, which
   261  				// would cause UpdateService below to fail. Reusing
   262  				// "ctx" could make it impossible to create a service
   263  				// if the registry is slow or unresponsive.
   264  				var cancel func()
   265  				ctx, cancel = c.getRequestContext()
   266  				defer cancel()
   267  			}
   268  
   269  			r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec})
   270  			if err != nil {
   271  				return err
   272  			}
   273  
   274  			resp.ID = r.Service.ID
   275  		}
   276  		return nil
   277  	})
   278  
   279  	return resp, err
   280  }
   281  
   282  // UpdateService updates existing service to match new properties.
   283  func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec swarm.ServiceSpec, flags types.ServiceUpdateOptions, queryRegistry bool) (*types.ServiceUpdateResponse, error) {
   284  	var resp *types.ServiceUpdateResponse
   285  
   286  	err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
   287  		err := c.populateNetworkID(ctx, state.controlClient, &spec)
   288  		if err != nil {
   289  			return err
   290  		}
   291  
   292  		serviceSpec, err := convert.ServiceSpecToGRPC(spec)
   293  		if err != nil {
   294  			return errdefs.InvalidParameter(err)
   295  		}
   296  
   297  		currentService, err := getService(ctx, state.controlClient, serviceIDOrName, false)
   298  		if err != nil {
   299  			return err
   300  		}
   301  
   302  		resp = &types.ServiceUpdateResponse{}
   303  
   304  		switch serviceSpec.Task.Runtime.(type) {
   305  		case *swarmapi.TaskSpec_Attachment:
   306  			return fmt.Errorf("invalid task spec: spec type %q not supported", swarm.RuntimeNetworkAttachment)
   307  		case *swarmapi.TaskSpec_Generic:
   308  			switch serviceSpec.Task.GetGeneric().Kind {
   309  			case string(swarm.RuntimePlugin):
   310  				if spec.TaskTemplate.PluginSpec == nil {
   311  					return errors.New("plugin spec must be set")
   312  				}
   313  			}
   314  		case *swarmapi.TaskSpec_Container:
   315  			newCtnr := serviceSpec.Task.GetContainer()
   316  			if newCtnr == nil {
   317  				return errors.New("service does not use container tasks")
   318  			}
   319  
   320  			encodedAuth := flags.EncodedRegistryAuth
   321  			if encodedAuth != "" {
   322  				newCtnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
   323  			} else {
   324  				// this is needed because if the encodedAuth isn't being updated then we
   325  				// shouldn't lose it, and continue to use the one that was already present
   326  				var ctnr *swarmapi.ContainerSpec
   327  				switch flags.RegistryAuthFrom {
   328  				case types.RegistryAuthFromSpec, "":
   329  					ctnr = currentService.Spec.Task.GetContainer()
   330  				case types.RegistryAuthFromPreviousSpec:
   331  					if currentService.PreviousSpec == nil {
   332  						return errors.New("service does not have a previous spec")
   333  					}
   334  					ctnr = currentService.PreviousSpec.Task.GetContainer()
   335  				default:
   336  					return errors.New("unsupported registryAuthFrom value")
   337  				}
   338  				if ctnr == nil {
   339  					return errors.New("service does not use container tasks")
   340  				}
   341  				newCtnr.PullOptions = ctnr.PullOptions
   342  				// update encodedAuth so it can be used to pin image by digest
   343  				if ctnr.PullOptions != nil {
   344  					encodedAuth = ctnr.PullOptions.RegistryAuth
   345  				}
   346  			}
   347  
   348  			// retrieve auth config from encoded auth
   349  			authConfig := &registry.AuthConfig{}
   350  			if encodedAuth != "" {
   351  				if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil {
   352  					logrus.Warnf("invalid authconfig: %v", err)
   353  				}
   354  			}
   355  
   356  			// pin image by digest for API versions < 1.30
   357  			// TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE"
   358  			// should be removed in the future. Since integration tests only use the
   359  			// latest API version, so this is no longer required.
   360  			if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry {
   361  				digestImage, err := c.imageWithDigestString(ctx, newCtnr.Image, authConfig)
   362  				if err != nil {
   363  					logrus.Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error())
   364  					// warning in the client response should be concise
   365  					resp.Warnings = append(resp.Warnings, digestWarning(newCtnr.Image))
   366  				} else if newCtnr.Image != digestImage {
   367  					logrus.Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage)
   368  					newCtnr.Image = digestImage
   369  				} else {
   370  					logrus.Debugf("updating service using supplied digest reference %s", newCtnr.Image)
   371  				}
   372  
   373  				// Replace the context with a fresh one.
   374  				// If we timed out while communicating with the
   375  				// registry, then "ctx" will already be expired, which
   376  				// would cause UpdateService below to fail. Reusing
   377  				// "ctx" could make it impossible to update a service
   378  				// if the registry is slow or unresponsive.
   379  				var cancel func()
   380  				ctx, cancel = c.getRequestContext()
   381  				defer cancel()
   382  			}
   383  		}
   384  
   385  		var rollback swarmapi.UpdateServiceRequest_Rollback
   386  		switch flags.Rollback {
   387  		case "", "none":
   388  			rollback = swarmapi.UpdateServiceRequest_NONE
   389  		case "previous":
   390  			rollback = swarmapi.UpdateServiceRequest_PREVIOUS
   391  		default:
   392  			return fmt.Errorf("unrecognized rollback option %s", flags.Rollback)
   393  		}
   394  
   395  		_, err = state.controlClient.UpdateService(
   396  			ctx,
   397  			&swarmapi.UpdateServiceRequest{
   398  				ServiceID: currentService.ID,
   399  				Spec:      &serviceSpec,
   400  				ServiceVersion: &swarmapi.Version{
   401  					Index: version,
   402  				},
   403  				Rollback: rollback,
   404  			},
   405  		)
   406  		return err
   407  	})
   408  	return resp, err
   409  }
   410  
   411  // RemoveService removes a service from a managed swarm cluster.
   412  func (c *Cluster) RemoveService(input string) error {
   413  	return c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
   414  		service, err := getService(ctx, state.controlClient, input, false)
   415  		if err != nil {
   416  			return err
   417  		}
   418  
   419  		_, err = state.controlClient.RemoveService(ctx, &swarmapi.RemoveServiceRequest{ServiceID: service.ID})
   420  		return err
   421  	})
   422  }
   423  
   424  // ServiceLogs collects service logs and writes them back to `config.OutStream`
   425  func (c *Cluster) ServiceLogs(ctx context.Context, selector *backend.LogSelector, config *types.ContainerLogsOptions) (<-chan *backend.LogMessage, error) {
   426  	c.mu.RLock()
   427  	defer c.mu.RUnlock()
   428  
   429  	state := c.currentNodeState()
   430  	if !state.IsActiveManager() {
   431  		return nil, c.errNoManager(state)
   432  	}
   433  
   434  	swarmSelector, err := convertSelector(ctx, state.controlClient, selector)
   435  	if err != nil {
   436  		return nil, errors.Wrap(err, "error making log selector")
   437  	}
   438  
   439  	// set the streams we'll use
   440  	stdStreams := []swarmapi.LogStream{}
   441  	if config.ShowStdout {
   442  		stdStreams = append(stdStreams, swarmapi.LogStreamStdout)
   443  	}
   444  	if config.ShowStderr {
   445  		stdStreams = append(stdStreams, swarmapi.LogStreamStderr)
   446  	}
   447  
   448  	// Get tail value squared away - the number of previous log lines we look at
   449  	var tail int64
   450  	// in ContainerLogs, if the tail value is ANYTHING non-integer, we just set
   451  	// it to -1 (all). i don't agree with that, but i also think no tail value
   452  	// should be legitimate. if you don't pass tail, we assume you want "all"
   453  	if config.Tail == "all" || config.Tail == "" {
   454  		// tail of 0 means send all logs on the swarmkit side
   455  		tail = 0
   456  	} else {
   457  		t, err := strconv.Atoi(config.Tail)
   458  		if err != nil {
   459  			return nil, errors.New("tail value must be a positive integer or \"all\"")
   460  		}
   461  		if t < 0 {
   462  			return nil, errors.New("negative tail values not supported")
   463  		}
   464  		// we actually use negative tail in swarmkit to represent messages
   465  		// backwards starting from the beginning. also, -1 means no logs. so,
   466  		// basically, for api compat with docker container logs, add one and
   467  		// flip the sign. we error above if you try to negative tail, which
   468  		// isn't supported by docker (and would error deeper in the stack
   469  		// anyway)
   470  		//
   471  		// See the logs protobuf for more information
   472  		tail = int64(-(t + 1))
   473  	}
   474  
   475  	// get the since value - the time in the past we're looking at logs starting from
   476  	var sinceProto *gogotypes.Timestamp
   477  	if config.Since != "" {
   478  		s, n, err := timetypes.ParseTimestamps(config.Since, 0)
   479  		if err != nil {
   480  			return nil, errors.Wrap(err, "could not parse since timestamp")
   481  		}
   482  		since := time.Unix(s, n)
   483  		sinceProto, err = gogotypes.TimestampProto(since)
   484  		if err != nil {
   485  			return nil, errors.Wrap(err, "could not parse timestamp to proto")
   486  		}
   487  	}
   488  
   489  	stream, err := state.logsClient.SubscribeLogs(ctx, &swarmapi.SubscribeLogsRequest{
   490  		Selector: swarmSelector,
   491  		Options: &swarmapi.LogSubscriptionOptions{
   492  			Follow:  config.Follow,
   493  			Streams: stdStreams,
   494  			Tail:    tail,
   495  			Since:   sinceProto,
   496  		},
   497  	})
   498  	if err != nil {
   499  		return nil, err
   500  	}
   501  
   502  	messageChan := make(chan *backend.LogMessage, 1)
   503  	go func() {
   504  		defer close(messageChan)
   505  		for {
   506  			// Check the context before doing anything.
   507  			select {
   508  			case <-ctx.Done():
   509  				return
   510  			default:
   511  			}
   512  			subscribeMsg, err := stream.Recv()
   513  			if err == io.EOF {
   514  				return
   515  			}
   516  			// if we're not io.EOF, push the message in and return
   517  			if err != nil {
   518  				select {
   519  				case <-ctx.Done():
   520  				case messageChan <- &backend.LogMessage{Err: err}:
   521  				}
   522  				return
   523  			}
   524  
   525  			for _, msg := range subscribeMsg.Messages {
   526  				// make a new message
   527  				m := new(backend.LogMessage)
   528  				m.Attrs = make([]backend.LogAttr, 0, len(msg.Attrs)+3)
   529  				// add the timestamp, adding the error if it fails
   530  				m.Timestamp, err = gogotypes.TimestampFromProto(msg.Timestamp)
   531  				if err != nil {
   532  					m.Err = err
   533  				}
   534  
   535  				nodeKey := contextPrefix + ".node.id"
   536  				serviceKey := contextPrefix + ".service.id"
   537  				taskKey := contextPrefix + ".task.id"
   538  
   539  				// copy over all of the details
   540  				for _, d := range msg.Attrs {
   541  					switch d.Key {
   542  					case nodeKey, serviceKey, taskKey:
   543  						// we have the final say over context details (in case there
   544  						// is a conflict (if the user added a detail with a context's
   545  						// key for some reason))
   546  					default:
   547  						m.Attrs = append(m.Attrs, backend.LogAttr{Key: d.Key, Value: d.Value})
   548  					}
   549  				}
   550  				m.Attrs = append(m.Attrs,
   551  					backend.LogAttr{Key: nodeKey, Value: msg.Context.NodeID},
   552  					backend.LogAttr{Key: serviceKey, Value: msg.Context.ServiceID},
   553  					backend.LogAttr{Key: taskKey, Value: msg.Context.TaskID},
   554  				)
   555  
   556  				switch msg.Stream {
   557  				case swarmapi.LogStreamStdout:
   558  					m.Source = "stdout"
   559  				case swarmapi.LogStreamStderr:
   560  					m.Source = "stderr"
   561  				}
   562  				m.Line = msg.Data
   563  
   564  				// there could be a case where the reader stops accepting
   565  				// messages and the context is canceled. we need to check that
   566  				// here, or otherwise we risk blocking forever on the message
   567  				// send.
   568  				select {
   569  				case <-ctx.Done():
   570  					return
   571  				case messageChan <- m:
   572  				}
   573  			}
   574  		}
   575  	}()
   576  	return messageChan, nil
   577  }
   578  
   579  // convertSelector takes a backend.LogSelector, which contains raw names that
   580  // may or may not be valid, and converts them to an api.LogSelector proto. It
   581  // returns an error if something fails
   582  func convertSelector(ctx context.Context, cc swarmapi.ControlClient, selector *backend.LogSelector) (*swarmapi.LogSelector, error) {
   583  	// don't rely on swarmkit to resolve IDs, do it ourselves
   584  	swarmSelector := &swarmapi.LogSelector{}
   585  	for _, s := range selector.Services {
   586  		service, err := getService(ctx, cc, s, false)
   587  		if err != nil {
   588  			return nil, err
   589  		}
   590  		c := service.Spec.Task.GetContainer()
   591  		if c == nil {
   592  			return nil, errors.New("logs only supported on container tasks")
   593  		}
   594  		swarmSelector.ServiceIDs = append(swarmSelector.ServiceIDs, service.ID)
   595  	}
   596  	for _, t := range selector.Tasks {
   597  		task, err := getTask(ctx, cc, t)
   598  		if err != nil {
   599  			return nil, err
   600  		}
   601  		c := task.Spec.GetContainer()
   602  		if c == nil {
   603  			return nil, errors.New("logs only supported on container tasks")
   604  		}
   605  		swarmSelector.TaskIDs = append(swarmSelector.TaskIDs, task.ID)
   606  	}
   607  	return swarmSelector, nil
   608  }
   609  
   610  // imageWithDigestString takes an image such as name or name:tag
   611  // and returns the image pinned to a digest, such as name@sha256:34234
   612  func (c *Cluster) imageWithDigestString(ctx context.Context, image string, authConfig *registry.AuthConfig) (string, error) {
   613  	ref, err := reference.ParseAnyReference(image)
   614  	if err != nil {
   615  		return "", err
   616  	}
   617  	namedRef, ok := ref.(reference.Named)
   618  	if !ok {
   619  		if _, ok := ref.(reference.Digested); ok {
   620  			return image, nil
   621  		}
   622  		return "", errors.Errorf("unknown image reference format: %s", image)
   623  	}
   624  	// only query registry if not a canonical reference (i.e. with digest)
   625  	if _, ok := namedRef.(reference.Canonical); !ok {
   626  		namedRef = reference.TagNameOnly(namedRef)
   627  
   628  		taggedRef, ok := namedRef.(reference.NamedTagged)
   629  		if !ok {
   630  			return "", errors.Errorf("image reference not tagged: %s", image)
   631  		}
   632  
   633  		repo, err := c.config.ImageBackend.GetRepository(ctx, taggedRef, authConfig)
   634  		if err != nil {
   635  			return "", err
   636  		}
   637  		dscrptr, err := repo.Tags(ctx).Get(ctx, taggedRef.Tag())
   638  		if err != nil {
   639  			return "", err
   640  		}
   641  
   642  		namedDigestedRef, err := reference.WithDigest(taggedRef, dscrptr.Digest)
   643  		if err != nil {
   644  			return "", err
   645  		}
   646  		// return familiar form until interface updated to return type
   647  		return reference.FamiliarString(namedDigestedRef), nil
   648  	}
   649  	// reference already contains a digest, so just return it
   650  	return reference.FamiliarString(ref), nil
   651  }
   652  
   653  // digestWarning constructs a formatted warning string
   654  // using the image name that could not be pinned by digest. The
   655  // formatting is hardcoded, but could me made smarter in the future
   656  func digestWarning(image string) string {
   657  	return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image)
   658  }