github.com/demonoid81/moby@v0.0.0-20200517203328-62dd8e17c460/daemon/cluster/services.go (about)

     1  package cluster // import "github.com/demonoid81/moby/daemon/cluster"
     2  
     3  import (
     4  	"context"
     5  	"encoding/base64"
     6  	"encoding/json"
     7  	"fmt"
     8  	"io"
     9  	"os"
    10  	"strconv"
    11  	"strings"
    12  	"time"
    13  
    14  	"github.com/docker/distribution/reference"
    15  	apitypes "github.com/demonoid81/moby/api/types"
    16  	"github.com/demonoid81/moby/api/types/backend"
    17  	types "github.com/demonoid81/moby/api/types/swarm"
    18  	timetypes "github.com/demonoid81/moby/api/types/time"
    19  	"github.com/demonoid81/moby/daemon/cluster/convert"
    20  	"github.com/demonoid81/moby/errdefs"
    21  	runconfigopts "github.com/demonoid81/moby/runconfig/opts"
    22  	swarmapi "github.com/docker/swarmkit/api"
    23  	gogotypes "github.com/gogo/protobuf/types"
    24  	"github.com/pkg/errors"
    25  	"github.com/sirupsen/logrus"
    26  	"google.golang.org/grpc"
    27  )
    28  
    29  // GetServices returns all services of a managed swarm cluster.
    30  func (c *Cluster) GetServices(options apitypes.ServiceListOptions) ([]types.Service, error) {
    31  	c.mu.RLock()
    32  	defer c.mu.RUnlock()
    33  
    34  	state := c.currentNodeState()
    35  	if !state.IsActiveManager() {
    36  		return nil, c.errNoManager(state)
    37  	}
    38  
    39  	// We move the accepted filter check here as "mode" filter
    40  	// is processed in the daemon, not in SwarmKit. So it might
    41  	// be good to have accepted file check in the same file as
    42  	// the filter processing (in the for loop below).
    43  	accepted := map[string]bool{
    44  		"name":    true,
    45  		"id":      true,
    46  		"label":   true,
    47  		"mode":    true,
    48  		"runtime": true,
    49  	}
    50  	if err := options.Filters.Validate(accepted); err != nil {
    51  		return nil, err
    52  	}
    53  
    54  	if len(options.Filters.Get("runtime")) == 0 {
    55  		// Default to using the container runtime filter
    56  		options.Filters.Add("runtime", string(types.RuntimeContainer))
    57  	}
    58  
    59  	filters := &swarmapi.ListServicesRequest_Filters{
    60  		NamePrefixes: options.Filters.Get("name"),
    61  		IDPrefixes:   options.Filters.Get("id"),
    62  		Labels:       runconfigopts.ConvertKVStringsToMap(options.Filters.Get("label")),
    63  		Runtimes:     options.Filters.Get("runtime"),
    64  	}
    65  
    66  	ctx, cancel := c.getRequestContext()
    67  	defer cancel()
    68  
    69  	r, err := state.controlClient.ListServices(
    70  		ctx,
    71  		&swarmapi.ListServicesRequest{Filters: filters},
    72  		grpc.MaxCallRecvMsgSize(defaultRecvSizeForListResponse),
    73  	)
    74  	if err != nil {
    75  		return nil, err
    76  	}
    77  
    78  	services := make([]types.Service, 0, len(r.Services))
    79  
    80  	// if the  user requests the service statuses, we'll store the IDs needed
    81  	// in this slice
    82  	var serviceIDs []string
    83  	if options.Status {
    84  		serviceIDs = make([]string, 0, len(r.Services))
    85  	}
    86  	for _, service := range r.Services {
    87  		if options.Filters.Contains("mode") {
    88  			var mode string
    89  			switch service.Spec.GetMode().(type) {
    90  			case *swarmapi.ServiceSpec_Global:
    91  				mode = "global"
    92  			case *swarmapi.ServiceSpec_Replicated:
    93  				mode = "replicated"
    94  			case *swarmapi.ServiceSpec_ReplicatedJob:
    95  				mode = "replicatedjob"
    96  			case *swarmapi.ServiceSpec_GlobalJob:
    97  				mode = "globaljob"
    98  			}
    99  
   100  			if !options.Filters.ExactMatch("mode", mode) {
   101  				continue
   102  			}
   103  		}
   104  		if options.Status {
   105  			serviceIDs = append(serviceIDs, service.ID)
   106  		}
   107  		svcs, err := convert.ServiceFromGRPC(*service)
   108  		if err != nil {
   109  			return nil, err
   110  		}
   111  		services = append(services, svcs)
   112  	}
   113  
   114  	if options.Status {
   115  		// Listing service statuses is a separate call because, while it is the
   116  		// most common UI operation, it is still just a UI operation, and it
   117  		// would be improper to include this data in swarm's Service object.
   118  		// We pay the cost with some complexity here, but this is still way
   119  		// more efficient than marshalling and unmarshalling all the JSON
   120  		// needed to list tasks and get this data otherwise client-side
   121  		resp, err := state.controlClient.ListServiceStatuses(
   122  			ctx,
   123  			&swarmapi.ListServiceStatusesRequest{Services: serviceIDs},
   124  			grpc.MaxCallRecvMsgSize(defaultRecvSizeForListResponse),
   125  		)
   126  		if err != nil {
   127  			return nil, err
   128  		}
   129  
   130  		// we'll need to match up statuses in the response with the services in
   131  		// the list operation. if we did this by operating on two lists, the
   132  		// result would be quadratic. instead, make a mapping of service IDs to
   133  		// service statuses so that this is roughly linear. additionally,
   134  		// convert the status response to an engine api service status here.
   135  		serviceMap := map[string]*types.ServiceStatus{}
   136  		for _, status := range resp.Statuses {
   137  			serviceMap[status.ServiceID] = &types.ServiceStatus{
   138  				RunningTasks:   status.RunningTasks,
   139  				DesiredTasks:   status.DesiredTasks,
   140  				CompletedTasks: status.CompletedTasks,
   141  			}
   142  		}
   143  
   144  		// because this is a list of values and not pointers, make sure we
   145  		// actually alter the value when iterating.
   146  		for i, service := range services {
   147  			// the return value of the ListServiceStatuses operation is
   148  			// guaranteed to contain a value in the response for every argument
   149  			// in the request, so we can safely do this assignment. and even if
   150  			// it wasn't, and the service ID was for some reason absent from
   151  			// this map, the resulting value of service.Status would just be
   152  			// nil -- the same thing it was before
   153  			service.ServiceStatus = serviceMap[service.ID]
   154  			services[i] = service
   155  		}
   156  	}
   157  
   158  	return services, nil
   159  }
   160  
   161  // GetService returns a service based on an ID or name.
   162  func (c *Cluster) GetService(input string, insertDefaults bool) (types.Service, error) {
   163  	var service *swarmapi.Service
   164  	if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
   165  		s, err := getService(ctx, state.controlClient, input, insertDefaults)
   166  		if err != nil {
   167  			return err
   168  		}
   169  		service = s
   170  		return nil
   171  	}); err != nil {
   172  		return types.Service{}, err
   173  	}
   174  	svc, err := convert.ServiceFromGRPC(*service)
   175  	if err != nil {
   176  		return types.Service{}, err
   177  	}
   178  	return svc, nil
   179  }
   180  
   181  // CreateService creates a new service in a managed swarm cluster.
   182  func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string, queryRegistry bool) (*apitypes.ServiceCreateResponse, error) {
   183  	var resp *apitypes.ServiceCreateResponse
   184  	err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
   185  		err := c.populateNetworkID(ctx, state.controlClient, &s)
   186  		if err != nil {
   187  			return err
   188  		}
   189  
   190  		serviceSpec, err := convert.ServiceSpecToGRPC(s)
   191  		if err != nil {
   192  			return errdefs.InvalidParameter(err)
   193  		}
   194  
   195  		resp = &apitypes.ServiceCreateResponse{}
   196  
   197  		switch serviceSpec.Task.Runtime.(type) {
   198  		case *swarmapi.TaskSpec_Attachment:
   199  			return fmt.Errorf("invalid task spec: spec type %q not supported", types.RuntimeNetworkAttachment)
   200  		// handle other runtimes here
   201  		case *swarmapi.TaskSpec_Generic:
   202  			switch serviceSpec.Task.GetGeneric().Kind {
   203  			case string(types.RuntimePlugin):
   204  				if !c.config.Backend.HasExperimental() {
   205  					return fmt.Errorf("runtime type %q only supported in experimental", types.RuntimePlugin)
   206  				}
   207  				if s.TaskTemplate.PluginSpec == nil {
   208  					return errors.New("plugin spec must be set")
   209  				}
   210  
   211  			default:
   212  				return fmt.Errorf("unsupported runtime type: %q", serviceSpec.Task.GetGeneric().Kind)
   213  			}
   214  
   215  			r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec})
   216  			if err != nil {
   217  				return err
   218  			}
   219  
   220  			resp.ID = r.Service.ID
   221  		case *swarmapi.TaskSpec_Container:
   222  			ctnr := serviceSpec.Task.GetContainer()
   223  			if ctnr == nil {
   224  				return errors.New("service does not use container tasks")
   225  			}
   226  			if encodedAuth != "" {
   227  				ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
   228  			}
   229  
   230  			// retrieve auth config from encoded auth
   231  			authConfig := &apitypes.AuthConfig{}
   232  			if encodedAuth != "" {
   233  				authReader := strings.NewReader(encodedAuth)
   234  				dec := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, authReader))
   235  				if err := dec.Decode(authConfig); err != nil {
   236  					logrus.Warnf("invalid authconfig: %v", err)
   237  				}
   238  			}
   239  
   240  			// pin image by digest for API versions < 1.30
   241  			// TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE"
   242  			// should be removed in the future. Since integration tests only use the
   243  			// latest API version, so this is no longer required.
   244  			if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry {
   245  				digestImage, err := c.imageWithDigestString(ctx, ctnr.Image, authConfig)
   246  				if err != nil {
   247  					logrus.Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error())
   248  					// warning in the client response should be concise
   249  					resp.Warnings = append(resp.Warnings, digestWarning(ctnr.Image))
   250  
   251  				} else if ctnr.Image != digestImage {
   252  					logrus.Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage)
   253  					ctnr.Image = digestImage
   254  
   255  				} else {
   256  					logrus.Debugf("creating service using supplied digest reference %s", ctnr.Image)
   257  
   258  				}
   259  
   260  				// Replace the context with a fresh one.
   261  				// If we timed out while communicating with the
   262  				// registry, then "ctx" will already be expired, which
   263  				// would cause UpdateService below to fail. Reusing
   264  				// "ctx" could make it impossible to create a service
   265  				// if the registry is slow or unresponsive.
   266  				var cancel func()
   267  				ctx, cancel = c.getRequestContext()
   268  				defer cancel()
   269  			}
   270  
   271  			r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec})
   272  			if err != nil {
   273  				return err
   274  			}
   275  
   276  			resp.ID = r.Service.ID
   277  		}
   278  		return nil
   279  	})
   280  
   281  	return resp, err
   282  }
   283  
   284  // UpdateService updates existing service to match new properties.
   285  func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec types.ServiceSpec, flags apitypes.ServiceUpdateOptions, queryRegistry bool) (*apitypes.ServiceUpdateResponse, error) {
   286  	var resp *apitypes.ServiceUpdateResponse
   287  
   288  	err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
   289  
   290  		err := c.populateNetworkID(ctx, state.controlClient, &spec)
   291  		if err != nil {
   292  			return err
   293  		}
   294  
   295  		serviceSpec, err := convert.ServiceSpecToGRPC(spec)
   296  		if err != nil {
   297  			return errdefs.InvalidParameter(err)
   298  		}
   299  
   300  		currentService, err := getService(ctx, state.controlClient, serviceIDOrName, false)
   301  		if err != nil {
   302  			return err
   303  		}
   304  
   305  		resp = &apitypes.ServiceUpdateResponse{}
   306  
   307  		switch serviceSpec.Task.Runtime.(type) {
   308  		case *swarmapi.TaskSpec_Attachment:
   309  			return fmt.Errorf("invalid task spec: spec type %q not supported", types.RuntimeNetworkAttachment)
   310  		case *swarmapi.TaskSpec_Generic:
   311  			switch serviceSpec.Task.GetGeneric().Kind {
   312  			case string(types.RuntimePlugin):
   313  				if spec.TaskTemplate.PluginSpec == nil {
   314  					return errors.New("plugin spec must be set")
   315  				}
   316  			}
   317  		case *swarmapi.TaskSpec_Container:
   318  			newCtnr := serviceSpec.Task.GetContainer()
   319  			if newCtnr == nil {
   320  				return errors.New("service does not use container tasks")
   321  			}
   322  
   323  			encodedAuth := flags.EncodedRegistryAuth
   324  			if encodedAuth != "" {
   325  				newCtnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
   326  			} else {
   327  				// this is needed because if the encodedAuth isn't being updated then we
   328  				// shouldn't lose it, and continue to use the one that was already present
   329  				var ctnr *swarmapi.ContainerSpec
   330  				switch flags.RegistryAuthFrom {
   331  				case apitypes.RegistryAuthFromSpec, "":
   332  					ctnr = currentService.Spec.Task.GetContainer()
   333  				case apitypes.RegistryAuthFromPreviousSpec:
   334  					if currentService.PreviousSpec == nil {
   335  						return errors.New("service does not have a previous spec")
   336  					}
   337  					ctnr = currentService.PreviousSpec.Task.GetContainer()
   338  				default:
   339  					return errors.New("unsupported registryAuthFrom value")
   340  				}
   341  				if ctnr == nil {
   342  					return errors.New("service does not use container tasks")
   343  				}
   344  				newCtnr.PullOptions = ctnr.PullOptions
   345  				// update encodedAuth so it can be used to pin image by digest
   346  				if ctnr.PullOptions != nil {
   347  					encodedAuth = ctnr.PullOptions.RegistryAuth
   348  				}
   349  			}
   350  
   351  			// retrieve auth config from encoded auth
   352  			authConfig := &apitypes.AuthConfig{}
   353  			if encodedAuth != "" {
   354  				if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil {
   355  					logrus.Warnf("invalid authconfig: %v", err)
   356  				}
   357  			}
   358  
   359  			// pin image by digest for API versions < 1.30
   360  			// TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE"
   361  			// should be removed in the future. Since integration tests only use the
   362  			// latest API version, so this is no longer required.
   363  			if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry {
   364  				digestImage, err := c.imageWithDigestString(ctx, newCtnr.Image, authConfig)
   365  				if err != nil {
   366  					logrus.Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error())
   367  					// warning in the client response should be concise
   368  					resp.Warnings = append(resp.Warnings, digestWarning(newCtnr.Image))
   369  				} else if newCtnr.Image != digestImage {
   370  					logrus.Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage)
   371  					newCtnr.Image = digestImage
   372  				} else {
   373  					logrus.Debugf("updating service using supplied digest reference %s", newCtnr.Image)
   374  				}
   375  
   376  				// Replace the context with a fresh one.
   377  				// If we timed out while communicating with the
   378  				// registry, then "ctx" will already be expired, which
   379  				// would cause UpdateService below to fail. Reusing
   380  				// "ctx" could make it impossible to update a service
   381  				// if the registry is slow or unresponsive.
   382  				var cancel func()
   383  				ctx, cancel = c.getRequestContext()
   384  				defer cancel()
   385  			}
   386  		}
   387  
   388  		var rollback swarmapi.UpdateServiceRequest_Rollback
   389  		switch flags.Rollback {
   390  		case "", "none":
   391  			rollback = swarmapi.UpdateServiceRequest_NONE
   392  		case "previous":
   393  			rollback = swarmapi.UpdateServiceRequest_PREVIOUS
   394  		default:
   395  			return fmt.Errorf("unrecognized rollback option %s", flags.Rollback)
   396  		}
   397  
   398  		_, err = state.controlClient.UpdateService(
   399  			ctx,
   400  			&swarmapi.UpdateServiceRequest{
   401  				ServiceID: currentService.ID,
   402  				Spec:      &serviceSpec,
   403  				ServiceVersion: &swarmapi.Version{
   404  					Index: version,
   405  				},
   406  				Rollback: rollback,
   407  			},
   408  		)
   409  		return err
   410  	})
   411  	return resp, err
   412  }
   413  
   414  // RemoveService removes a service from a managed swarm cluster.
   415  func (c *Cluster) RemoveService(input string) error {
   416  	return c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
   417  		service, err := getService(ctx, state.controlClient, input, false)
   418  		if err != nil {
   419  			return err
   420  		}
   421  
   422  		_, err = state.controlClient.RemoveService(ctx, &swarmapi.RemoveServiceRequest{ServiceID: service.ID})
   423  		return err
   424  	})
   425  }
   426  
   427  // ServiceLogs collects service logs and writes them back to `config.OutStream`
   428  func (c *Cluster) ServiceLogs(ctx context.Context, selector *backend.LogSelector, config *apitypes.ContainerLogsOptions) (<-chan *backend.LogMessage, error) {
   429  	c.mu.RLock()
   430  	defer c.mu.RUnlock()
   431  
   432  	state := c.currentNodeState()
   433  	if !state.IsActiveManager() {
   434  		return nil, c.errNoManager(state)
   435  	}
   436  
   437  	swarmSelector, err := convertSelector(ctx, state.controlClient, selector)
   438  	if err != nil {
   439  		return nil, errors.Wrap(err, "error making log selector")
   440  	}
   441  
   442  	// set the streams we'll use
   443  	stdStreams := []swarmapi.LogStream{}
   444  	if config.ShowStdout {
   445  		stdStreams = append(stdStreams, swarmapi.LogStreamStdout)
   446  	}
   447  	if config.ShowStderr {
   448  		stdStreams = append(stdStreams, swarmapi.LogStreamStderr)
   449  	}
   450  
   451  	// Get tail value squared away - the number of previous log lines we look at
   452  	var tail int64
   453  	// in ContainerLogs, if the tail value is ANYTHING non-integer, we just set
   454  	// it to -1 (all). i don't agree with that, but i also think no tail value
   455  	// should be legitimate. if you don't pass tail, we assume you want "all"
   456  	if config.Tail == "all" || config.Tail == "" {
   457  		// tail of 0 means send all logs on the swarmkit side
   458  		tail = 0
   459  	} else {
   460  		t, err := strconv.Atoi(config.Tail)
   461  		if err != nil {
   462  			return nil, errors.New("tail value must be a positive integer or \"all\"")
   463  		}
   464  		if t < 0 {
   465  			return nil, errors.New("negative tail values not supported")
   466  		}
   467  		// we actually use negative tail in swarmkit to represent messages
   468  		// backwards starting from the beginning. also, -1 means no logs. so,
   469  		// basically, for api compat with docker container logs, add one and
   470  		// flip the sign. we error above if you try to negative tail, which
   471  		// isn't supported by docker (and would error deeper in the stack
   472  		// anyway)
   473  		//
   474  		// See the logs protobuf for more information
   475  		tail = int64(-(t + 1))
   476  	}
   477  
   478  	// get the since value - the time in the past we're looking at logs starting from
   479  	var sinceProto *gogotypes.Timestamp
   480  	if config.Since != "" {
   481  		s, n, err := timetypes.ParseTimestamps(config.Since, 0)
   482  		if err != nil {
   483  			return nil, errors.Wrap(err, "could not parse since timestamp")
   484  		}
   485  		since := time.Unix(s, n)
   486  		sinceProto, err = gogotypes.TimestampProto(since)
   487  		if err != nil {
   488  			return nil, errors.Wrap(err, "could not parse timestamp to proto")
   489  		}
   490  	}
   491  
   492  	stream, err := state.logsClient.SubscribeLogs(ctx, &swarmapi.SubscribeLogsRequest{
   493  		Selector: swarmSelector,
   494  		Options: &swarmapi.LogSubscriptionOptions{
   495  			Follow:  config.Follow,
   496  			Streams: stdStreams,
   497  			Tail:    tail,
   498  			Since:   sinceProto,
   499  		},
   500  	})
   501  	if err != nil {
   502  		return nil, err
   503  	}
   504  
   505  	messageChan := make(chan *backend.LogMessage, 1)
   506  	go func() {
   507  		defer close(messageChan)
   508  		for {
   509  			// Check the context before doing anything.
   510  			select {
   511  			case <-ctx.Done():
   512  				return
   513  			default:
   514  			}
   515  			subscribeMsg, err := stream.Recv()
   516  			if err == io.EOF {
   517  				return
   518  			}
   519  			// if we're not io.EOF, push the message in and return
   520  			if err != nil {
   521  				select {
   522  				case <-ctx.Done():
   523  				case messageChan <- &backend.LogMessage{Err: err}:
   524  				}
   525  				return
   526  			}
   527  
   528  			for _, msg := range subscribeMsg.Messages {
   529  				// make a new message
   530  				m := new(backend.LogMessage)
   531  				m.Attrs = make([]backend.LogAttr, 0, len(msg.Attrs)+3)
   532  				// add the timestamp, adding the error if it fails
   533  				m.Timestamp, err = gogotypes.TimestampFromProto(msg.Timestamp)
   534  				if err != nil {
   535  					m.Err = err
   536  				}
   537  
   538  				nodeKey := contextPrefix + ".node.id"
   539  				serviceKey := contextPrefix + ".service.id"
   540  				taskKey := contextPrefix + ".task.id"
   541  
   542  				// copy over all of the details
   543  				for _, d := range msg.Attrs {
   544  					switch d.Key {
   545  					case nodeKey, serviceKey, taskKey:
   546  						// we have the final say over context details (in case there
   547  						// is a conflict (if the user added a detail with a context's
   548  						// key for some reason))
   549  					default:
   550  						m.Attrs = append(m.Attrs, backend.LogAttr{Key: d.Key, Value: d.Value})
   551  					}
   552  				}
   553  				m.Attrs = append(m.Attrs,
   554  					backend.LogAttr{Key: nodeKey, Value: msg.Context.NodeID},
   555  					backend.LogAttr{Key: serviceKey, Value: msg.Context.ServiceID},
   556  					backend.LogAttr{Key: taskKey, Value: msg.Context.TaskID},
   557  				)
   558  
   559  				switch msg.Stream {
   560  				case swarmapi.LogStreamStdout:
   561  					m.Source = "stdout"
   562  				case swarmapi.LogStreamStderr:
   563  					m.Source = "stderr"
   564  				}
   565  				m.Line = msg.Data
   566  
   567  				// there could be a case where the reader stops accepting
   568  				// messages and the context is canceled. we need to check that
   569  				// here, or otherwise we risk blocking forever on the message
   570  				// send.
   571  				select {
   572  				case <-ctx.Done():
   573  					return
   574  				case messageChan <- m:
   575  				}
   576  			}
   577  		}
   578  	}()
   579  	return messageChan, nil
   580  }
   581  
   582  // convertSelector takes a backend.LogSelector, which contains raw names that
   583  // may or may not be valid, and converts them to an api.LogSelector proto. It
   584  // returns an error if something fails
   585  func convertSelector(ctx context.Context, cc swarmapi.ControlClient, selector *backend.LogSelector) (*swarmapi.LogSelector, error) {
   586  	// don't rely on swarmkit to resolve IDs, do it ourselves
   587  	swarmSelector := &swarmapi.LogSelector{}
   588  	for _, s := range selector.Services {
   589  		service, err := getService(ctx, cc, s, false)
   590  		if err != nil {
   591  			return nil, err
   592  		}
   593  		c := service.Spec.Task.GetContainer()
   594  		if c == nil {
   595  			return nil, errors.New("logs only supported on container tasks")
   596  		}
   597  		swarmSelector.ServiceIDs = append(swarmSelector.ServiceIDs, service.ID)
   598  	}
   599  	for _, t := range selector.Tasks {
   600  		task, err := getTask(ctx, cc, t)
   601  		if err != nil {
   602  			return nil, err
   603  		}
   604  		c := task.Spec.GetContainer()
   605  		if c == nil {
   606  			return nil, errors.New("logs only supported on container tasks")
   607  		}
   608  		swarmSelector.TaskIDs = append(swarmSelector.TaskIDs, task.ID)
   609  	}
   610  	return swarmSelector, nil
   611  }
   612  
   613  // imageWithDigestString takes an image such as name or name:tag
   614  // and returns the image pinned to a digest, such as name@sha256:34234
   615  func (c *Cluster) imageWithDigestString(ctx context.Context, image string, authConfig *apitypes.AuthConfig) (string, error) {
   616  	ref, err := reference.ParseAnyReference(image)
   617  	if err != nil {
   618  		return "", err
   619  	}
   620  	namedRef, ok := ref.(reference.Named)
   621  	if !ok {
   622  		if _, ok := ref.(reference.Digested); ok {
   623  			return image, nil
   624  		}
   625  		return "", errors.Errorf("unknown image reference format: %s", image)
   626  	}
   627  	// only query registry if not a canonical reference (i.e. with digest)
   628  	if _, ok := namedRef.(reference.Canonical); !ok {
   629  		namedRef = reference.TagNameOnly(namedRef)
   630  
   631  		taggedRef, ok := namedRef.(reference.NamedTagged)
   632  		if !ok {
   633  			return "", errors.Errorf("image reference not tagged: %s", image)
   634  		}
   635  
   636  		repo, _, err := c.config.ImageBackend.GetRepository(ctx, taggedRef, authConfig)
   637  		if err != nil {
   638  			return "", err
   639  		}
   640  		dscrptr, err := repo.Tags(ctx).Get(ctx, taggedRef.Tag())
   641  		if err != nil {
   642  			return "", err
   643  		}
   644  
   645  		namedDigestedRef, err := reference.WithDigest(taggedRef, dscrptr.Digest)
   646  		if err != nil {
   647  			return "", err
   648  		}
   649  		// return familiar form until interface updated to return type
   650  		return reference.FamiliarString(namedDigestedRef), nil
   651  	}
   652  	// reference already contains a digest, so just return it
   653  	return reference.FamiliarString(ref), nil
   654  }
   655  
   656  // digestWarning constructs a formatted warning string
   657  // using the image name that could not be pinned by digest. The
   658  // formatting is hardcoded, but could me made smarter in the future
   659  func digestWarning(image string) string {
   660  	return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image)
   661  }