github.com/moby/docker@v26.1.3+incompatible/daemon/cluster/services.go (about)

     1  package cluster // import "github.com/docker/docker/daemon/cluster"
     2  
     3  import (
     4  	"context"
     5  	"encoding/base64"
     6  	"encoding/json"
     7  	"fmt"
     8  	"io"
     9  	"os"
    10  	"strconv"
    11  	"strings"
    12  	"time"
    13  
    14  	"github.com/containerd/log"
    15  	"github.com/distribution/reference"
    16  	"github.com/docker/docker/api/types"
    17  	"github.com/docker/docker/api/types/backend"
    18  	"github.com/docker/docker/api/types/container"
    19  	"github.com/docker/docker/api/types/registry"
    20  	"github.com/docker/docker/api/types/swarm"
    21  	timetypes "github.com/docker/docker/api/types/time"
    22  	"github.com/docker/docker/daemon/cluster/convert"
    23  	"github.com/docker/docker/errdefs"
    24  	"github.com/docker/docker/internal/compatcontext"
    25  	runconfigopts "github.com/docker/docker/runconfig/opts"
    26  	gogotypes "github.com/gogo/protobuf/types"
    27  	swarmapi "github.com/moby/swarmkit/v2/api"
    28  	"github.com/opencontainers/go-digest"
    29  	"github.com/pkg/errors"
    30  	"google.golang.org/grpc"
    31  )
    32  
    33  // GetServices returns all services of a managed swarm cluster.
    34  func (c *Cluster) GetServices(options types.ServiceListOptions) ([]swarm.Service, error) {
    35  	c.mu.RLock()
    36  	defer c.mu.RUnlock()
    37  
    38  	state := c.currentNodeState()
    39  	if !state.IsActiveManager() {
    40  		return nil, c.errNoManager(state)
    41  	}
    42  
    43  	// We move the accepted filter check here as "mode" filter
    44  	// is processed in the daemon, not in SwarmKit. So it might
    45  	// be good to have accepted file check in the same file as
    46  	// the filter processing (in the for loop below).
    47  	accepted := map[string]bool{
    48  		"name":    true,
    49  		"id":      true,
    50  		"label":   true,
    51  		"mode":    true,
    52  		"runtime": true,
    53  	}
    54  	if err := options.Filters.Validate(accepted); err != nil {
    55  		return nil, err
    56  	}
    57  
    58  	if len(options.Filters.Get("runtime")) == 0 {
    59  		// Default to using the container runtime filter
    60  		options.Filters.Add("runtime", string(swarm.RuntimeContainer))
    61  	}
    62  
    63  	filters := &swarmapi.ListServicesRequest_Filters{
    64  		NamePrefixes: options.Filters.Get("name"),
    65  		IDPrefixes:   options.Filters.Get("id"),
    66  		Labels:       runconfigopts.ConvertKVStringsToMap(options.Filters.Get("label")),
    67  		Runtimes:     options.Filters.Get("runtime"),
    68  	}
    69  
    70  	ctx := context.TODO()
    71  	ctx, cancel := c.getRequestContext(ctx)
    72  	defer cancel()
    73  
    74  	r, err := state.controlClient.ListServices(
    75  		ctx,
    76  		&swarmapi.ListServicesRequest{Filters: filters},
    77  		grpc.MaxCallRecvMsgSize(defaultRecvSizeForListResponse),
    78  	)
    79  	if err != nil {
    80  		return nil, err
    81  	}
    82  
    83  	services := make([]swarm.Service, 0, len(r.Services))
    84  
    85  	// if the  user requests the service statuses, we'll store the IDs needed
    86  	// in this slice
    87  	var serviceIDs []string
    88  	if options.Status {
    89  		serviceIDs = make([]string, 0, len(r.Services))
    90  	}
    91  	for _, service := range r.Services {
    92  		if options.Filters.Contains("mode") {
    93  			var mode string
    94  			switch service.Spec.GetMode().(type) {
    95  			case *swarmapi.ServiceSpec_Global:
    96  				mode = "global"
    97  			case *swarmapi.ServiceSpec_Replicated:
    98  				mode = "replicated"
    99  			case *swarmapi.ServiceSpec_ReplicatedJob:
   100  				mode = "replicated-job"
   101  			case *swarmapi.ServiceSpec_GlobalJob:
   102  				mode = "global-job"
   103  			}
   104  
   105  			if !options.Filters.ExactMatch("mode", mode) {
   106  				continue
   107  			}
   108  		}
   109  		if options.Status {
   110  			serviceIDs = append(serviceIDs, service.ID)
   111  		}
   112  		svcs, err := convert.ServiceFromGRPC(*service)
   113  		if err != nil {
   114  			return nil, err
   115  		}
   116  		services = append(services, svcs)
   117  	}
   118  
   119  	if options.Status {
   120  		// Listing service statuses is a separate call because, while it is the
   121  		// most common UI operation, it is still just a UI operation, and it
   122  		// would be improper to include this data in swarm's Service object.
   123  		// We pay the cost with some complexity here, but this is still way
   124  		// more efficient than marshalling and unmarshalling all the JSON
   125  		// needed to list tasks and get this data otherwise client-side
   126  		resp, err := state.controlClient.ListServiceStatuses(
   127  			ctx,
   128  			&swarmapi.ListServiceStatusesRequest{Services: serviceIDs},
   129  			grpc.MaxCallRecvMsgSize(defaultRecvSizeForListResponse),
   130  		)
   131  		if err != nil {
   132  			return nil, err
   133  		}
   134  
   135  		// we'll need to match up statuses in the response with the services in
   136  		// the list operation. if we did this by operating on two lists, the
   137  		// result would be quadratic. instead, make a mapping of service IDs to
   138  		// service statuses so that this is roughly linear. additionally,
   139  		// convert the status response to an engine api service status here.
   140  		serviceMap := map[string]*swarm.ServiceStatus{}
   141  		for _, status := range resp.Statuses {
   142  			serviceMap[status.ServiceID] = &swarm.ServiceStatus{
   143  				RunningTasks:   status.RunningTasks,
   144  				DesiredTasks:   status.DesiredTasks,
   145  				CompletedTasks: status.CompletedTasks,
   146  			}
   147  		}
   148  
   149  		// because this is a list of values and not pointers, make sure we
   150  		// actually alter the value when iterating.
   151  		for i, service := range services {
   152  			// the return value of the ListServiceStatuses operation is
   153  			// guaranteed to contain a value in the response for every argument
   154  			// in the request, so we can safely do this assignment. and even if
   155  			// it wasn't, and the service ID was for some reason absent from
   156  			// this map, the resulting value of service.Status would just be
   157  			// nil -- the same thing it was before
   158  			service.ServiceStatus = serviceMap[service.ID]
   159  			services[i] = service
   160  		}
   161  	}
   162  
   163  	return services, nil
   164  }
   165  
   166  // GetService returns a service based on an ID or name.
   167  func (c *Cluster) GetService(input string, insertDefaults bool) (swarm.Service, error) {
   168  	var service *swarmapi.Service
   169  	if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
   170  		s, err := getService(ctx, state.controlClient, input, insertDefaults)
   171  		if err != nil {
   172  			return err
   173  		}
   174  		service = s
   175  		return nil
   176  	}); err != nil {
   177  		return swarm.Service{}, err
   178  	}
   179  	svc, err := convert.ServiceFromGRPC(*service)
   180  	if err != nil {
   181  		return swarm.Service{}, err
   182  	}
   183  	return svc, nil
   184  }
   185  
   186  // CreateService creates a new service in a managed swarm cluster.
   187  func (c *Cluster) CreateService(s swarm.ServiceSpec, encodedAuth string, queryRegistry bool) (*swarm.ServiceCreateResponse, error) {
   188  	var resp *swarm.ServiceCreateResponse
   189  	err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
   190  		err := c.populateNetworkID(ctx, state.controlClient, &s)
   191  		if err != nil {
   192  			return err
   193  		}
   194  
   195  		serviceSpec, err := convert.ServiceSpecToGRPC(s)
   196  		if err != nil {
   197  			return errdefs.InvalidParameter(err)
   198  		}
   199  
   200  		resp = &swarm.ServiceCreateResponse{}
   201  
   202  		switch serviceSpec.Task.Runtime.(type) {
   203  		case *swarmapi.TaskSpec_Attachment:
   204  			return fmt.Errorf("invalid task spec: spec type %q not supported", swarm.RuntimeNetworkAttachment)
   205  		// handle other runtimes here
   206  		case *swarmapi.TaskSpec_Generic:
   207  			switch serviceSpec.Task.GetGeneric().Kind {
   208  			case string(swarm.RuntimePlugin):
   209  				if !c.config.Backend.HasExperimental() {
   210  					return fmt.Errorf("runtime type %q only supported in experimental", swarm.RuntimePlugin)
   211  				}
   212  				if s.TaskTemplate.PluginSpec == nil {
   213  					return errors.New("plugin spec must be set")
   214  				}
   215  
   216  			default:
   217  				return fmt.Errorf("unsupported runtime type: %q", serviceSpec.Task.GetGeneric().Kind)
   218  			}
   219  
   220  			r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec})
   221  			if err != nil {
   222  				return err
   223  			}
   224  
   225  			resp.ID = r.Service.ID
   226  		case *swarmapi.TaskSpec_Container:
   227  			ctnr := serviceSpec.Task.GetContainer()
   228  			if ctnr == nil {
   229  				return errors.New("service does not use container tasks")
   230  			}
   231  			if encodedAuth != "" {
   232  				ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
   233  			}
   234  
   235  			// retrieve auth config from encoded auth
   236  			authConfig := &registry.AuthConfig{}
   237  			if encodedAuth != "" {
   238  				authReader := strings.NewReader(encodedAuth)
   239  				dec := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, authReader))
   240  				if err := dec.Decode(authConfig); err != nil {
   241  					log.G(ctx).Warnf("invalid authconfig: %v", err)
   242  				}
   243  			}
   244  
   245  			// pin image by digest for API versions < 1.30
   246  			// TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE"
   247  			// should be removed in the future. Since integration tests only use the
   248  			// latest API version, so this is no longer required.
   249  			if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry {
   250  				digestImage, err := c.imageWithDigestString(ctx, ctnr.Image, authConfig)
   251  				if err != nil {
   252  					log.G(ctx).Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error())
   253  					// warning in the client response should be concise
   254  					resp.Warnings = append(resp.Warnings, digestWarning(ctnr.Image))
   255  				} else if ctnr.Image != digestImage {
   256  					log.G(ctx).Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage)
   257  					ctnr.Image = digestImage
   258  				} else {
   259  					log.G(ctx).Debugf("creating service using supplied digest reference %s", ctnr.Image)
   260  				}
   261  
   262  				// Replace the context with a fresh one.
   263  				// If we timed out while communicating with the
   264  				// registry, then "ctx" will already be expired, which
   265  				// would cause UpdateService below to fail. Reusing
   266  				// "ctx" could make it impossible to create a service
   267  				// if the registry is slow or unresponsive.
   268  				var cancel func()
   269  				ctx = compatcontext.WithoutCancel(ctx)
   270  				ctx, cancel = c.getRequestContext(ctx)
   271  				defer cancel()
   272  			}
   273  
   274  			r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec})
   275  			if err != nil {
   276  				return err
   277  			}
   278  
   279  			resp.ID = r.Service.ID
   280  		}
   281  		return nil
   282  	})
   283  
   284  	return resp, err
   285  }
   286  
   287  // UpdateService updates existing service to match new properties.
   288  func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec swarm.ServiceSpec, flags types.ServiceUpdateOptions, queryRegistry bool) (*swarm.ServiceUpdateResponse, error) {
   289  	var resp *swarm.ServiceUpdateResponse
   290  
   291  	err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
   292  		err := c.populateNetworkID(ctx, state.controlClient, &spec)
   293  		if err != nil {
   294  			return err
   295  		}
   296  
   297  		serviceSpec, err := convert.ServiceSpecToGRPC(spec)
   298  		if err != nil {
   299  			return errdefs.InvalidParameter(err)
   300  		}
   301  
   302  		currentService, err := getService(ctx, state.controlClient, serviceIDOrName, false)
   303  		if err != nil {
   304  			return err
   305  		}
   306  
   307  		resp = &swarm.ServiceUpdateResponse{}
   308  
   309  		switch serviceSpec.Task.Runtime.(type) {
   310  		case *swarmapi.TaskSpec_Attachment:
   311  			return fmt.Errorf("invalid task spec: spec type %q not supported", swarm.RuntimeNetworkAttachment)
   312  		case *swarmapi.TaskSpec_Generic:
   313  			switch serviceSpec.Task.GetGeneric().Kind {
   314  			case string(swarm.RuntimePlugin):
   315  				if spec.TaskTemplate.PluginSpec == nil {
   316  					return errors.New("plugin spec must be set")
   317  				}
   318  			}
   319  		case *swarmapi.TaskSpec_Container:
   320  			newCtnr := serviceSpec.Task.GetContainer()
   321  			if newCtnr == nil {
   322  				return errors.New("service does not use container tasks")
   323  			}
   324  
   325  			encodedAuth := flags.EncodedRegistryAuth
   326  			if encodedAuth != "" {
   327  				newCtnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
   328  			} else {
   329  				// this is needed because if the encodedAuth isn't being updated then we
   330  				// shouldn't lose it, and continue to use the one that was already present
   331  				var ctnr *swarmapi.ContainerSpec
   332  				switch flags.RegistryAuthFrom {
   333  				case types.RegistryAuthFromSpec, "":
   334  					ctnr = currentService.Spec.Task.GetContainer()
   335  				case types.RegistryAuthFromPreviousSpec:
   336  					if currentService.PreviousSpec == nil {
   337  						return errors.New("service does not have a previous spec")
   338  					}
   339  					ctnr = currentService.PreviousSpec.Task.GetContainer()
   340  				default:
   341  					return errors.New("unsupported registryAuthFrom value")
   342  				}
   343  				if ctnr == nil {
   344  					return errors.New("service does not use container tasks")
   345  				}
   346  				newCtnr.PullOptions = ctnr.PullOptions
   347  				// update encodedAuth so it can be used to pin image by digest
   348  				if ctnr.PullOptions != nil {
   349  					encodedAuth = ctnr.PullOptions.RegistryAuth
   350  				}
   351  			}
   352  
   353  			// retrieve auth config from encoded auth
   354  			authConfig := &registry.AuthConfig{}
   355  			if encodedAuth != "" {
   356  				if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil {
   357  					log.G(ctx).Warnf("invalid authconfig: %v", err)
   358  				}
   359  			}
   360  
   361  			// pin image by digest for API versions < 1.30
   362  			// TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE"
   363  			// should be removed in the future. Since integration tests only use the
   364  			// latest API version, so this is no longer required.
   365  			if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry {
   366  				digestImage, err := c.imageWithDigestString(ctx, newCtnr.Image, authConfig)
   367  				if err != nil {
   368  					log.G(ctx).Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error())
   369  					// warning in the client response should be concise
   370  					resp.Warnings = append(resp.Warnings, digestWarning(newCtnr.Image))
   371  				} else if newCtnr.Image != digestImage {
   372  					log.G(ctx).Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage)
   373  					newCtnr.Image = digestImage
   374  				} else {
   375  					log.G(ctx).Debugf("updating service using supplied digest reference %s", newCtnr.Image)
   376  				}
   377  
   378  				// Replace the context with a fresh one.
   379  				// If we timed out while communicating with the
   380  				// registry, then "ctx" will already be expired, which
   381  				// would cause UpdateService below to fail. Reusing
   382  				// "ctx" could make it impossible to update a service
   383  				// if the registry is slow or unresponsive.
   384  				var cancel func()
   385  				ctx = compatcontext.WithoutCancel(ctx)
   386  				ctx, cancel = c.getRequestContext(ctx)
   387  				defer cancel()
   388  			}
   389  		}
   390  
   391  		var rollback swarmapi.UpdateServiceRequest_Rollback
   392  		switch flags.Rollback {
   393  		case "", "none":
   394  			rollback = swarmapi.UpdateServiceRequest_NONE
   395  		case "previous":
   396  			rollback = swarmapi.UpdateServiceRequest_PREVIOUS
   397  		default:
   398  			return fmt.Errorf("unrecognized rollback option %s", flags.Rollback)
   399  		}
   400  
   401  		_, err = state.controlClient.UpdateService(
   402  			ctx,
   403  			&swarmapi.UpdateServiceRequest{
   404  				ServiceID: currentService.ID,
   405  				Spec:      &serviceSpec,
   406  				ServiceVersion: &swarmapi.Version{
   407  					Index: version,
   408  				},
   409  				Rollback: rollback,
   410  			},
   411  		)
   412  		return err
   413  	})
   414  	return resp, err
   415  }
   416  
   417  // RemoveService removes a service from a managed swarm cluster.
   418  func (c *Cluster) RemoveService(input string) error {
   419  	return c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
   420  		service, err := getService(ctx, state.controlClient, input, false)
   421  		if err != nil {
   422  			return err
   423  		}
   424  
   425  		_, err = state.controlClient.RemoveService(ctx, &swarmapi.RemoveServiceRequest{ServiceID: service.ID})
   426  		return err
   427  	})
   428  }
   429  
   430  // ServiceLogs collects service logs and writes them back to `config.OutStream`
   431  func (c *Cluster) ServiceLogs(ctx context.Context, selector *backend.LogSelector, config *container.LogsOptions) (<-chan *backend.LogMessage, error) {
   432  	c.mu.RLock()
   433  	defer c.mu.RUnlock()
   434  
   435  	state := c.currentNodeState()
   436  	if !state.IsActiveManager() {
   437  		return nil, c.errNoManager(state)
   438  	}
   439  
   440  	swarmSelector, err := convertSelector(ctx, state.controlClient, selector)
   441  	if err != nil {
   442  		return nil, errors.Wrap(err, "error making log selector")
   443  	}
   444  
   445  	// set the streams we'll use
   446  	stdStreams := []swarmapi.LogStream{}
   447  	if config.ShowStdout {
   448  		stdStreams = append(stdStreams, swarmapi.LogStreamStdout)
   449  	}
   450  	if config.ShowStderr {
   451  		stdStreams = append(stdStreams, swarmapi.LogStreamStderr)
   452  	}
   453  
   454  	// Get tail value squared away - the number of previous log lines we look at
   455  	var tail int64
   456  	// in ContainerLogs, if the tail value is ANYTHING non-integer, we just set
   457  	// it to -1 (all). i don't agree with that, but i also think no tail value
   458  	// should be legitimate. if you don't pass tail, we assume you want "all"
   459  	if config.Tail == "all" || config.Tail == "" {
   460  		// tail of 0 means send all logs on the swarmkit side
   461  		tail = 0
   462  	} else {
   463  		t, err := strconv.Atoi(config.Tail)
   464  		if err != nil {
   465  			return nil, errors.New(`tail value must be a positive integer or "all"`)
   466  		}
   467  		if t < 0 {
   468  			return nil, errors.New("negative tail values not supported")
   469  		}
   470  		// we actually use negative tail in swarmkit to represent messages
   471  		// backwards starting from the beginning. also, -1 means no logs. so,
   472  		// basically, for api compat with docker container logs, add one and
   473  		// flip the sign. we error above if you try to negative tail, which
   474  		// isn't supported by docker (and would error deeper in the stack
   475  		// anyway)
   476  		//
   477  		// See the logs protobuf for more information
   478  		tail = int64(-(t + 1))
   479  	}
   480  
   481  	// get the since value - the time in the past we're looking at logs starting from
   482  	var sinceProto *gogotypes.Timestamp
   483  	if config.Since != "" {
   484  		s, n, err := timetypes.ParseTimestamps(config.Since, 0)
   485  		if err != nil {
   486  			return nil, errors.Wrap(err, "could not parse since timestamp")
   487  		}
   488  		since := time.Unix(s, n)
   489  		sinceProto, err = gogotypes.TimestampProto(since)
   490  		if err != nil {
   491  			return nil, errors.Wrap(err, "could not parse timestamp to proto")
   492  		}
   493  	}
   494  
   495  	stream, err := state.logsClient.SubscribeLogs(ctx, &swarmapi.SubscribeLogsRequest{
   496  		Selector: swarmSelector,
   497  		Options: &swarmapi.LogSubscriptionOptions{
   498  			Follow:  config.Follow,
   499  			Streams: stdStreams,
   500  			Tail:    tail,
   501  			Since:   sinceProto,
   502  		},
   503  	})
   504  	if err != nil {
   505  		return nil, err
   506  	}
   507  
   508  	messageChan := make(chan *backend.LogMessage, 1)
   509  	go func() {
   510  		defer close(messageChan)
   511  		for {
   512  			// Check the context before doing anything.
   513  			select {
   514  			case <-ctx.Done():
   515  				return
   516  			default:
   517  			}
   518  			subscribeMsg, err := stream.Recv()
   519  			if err == io.EOF {
   520  				return
   521  			}
   522  			// if we're not io.EOF, push the message in and return
   523  			if err != nil {
   524  				select {
   525  				case <-ctx.Done():
   526  				case messageChan <- &backend.LogMessage{Err: err}:
   527  				}
   528  				return
   529  			}
   530  
   531  			for _, msg := range subscribeMsg.Messages {
   532  				// make a new message
   533  				m := new(backend.LogMessage)
   534  				m.Attrs = make([]backend.LogAttr, 0, len(msg.Attrs)+3)
   535  				// add the timestamp, adding the error if it fails
   536  				m.Timestamp, err = gogotypes.TimestampFromProto(msg.Timestamp)
   537  				if err != nil {
   538  					m.Err = err
   539  				}
   540  
   541  				nodeKey := contextPrefix + ".node.id"
   542  				serviceKey := contextPrefix + ".service.id"
   543  				taskKey := contextPrefix + ".task.id"
   544  
   545  				// copy over all of the details
   546  				for _, d := range msg.Attrs {
   547  					switch d.Key {
   548  					case nodeKey, serviceKey, taskKey:
   549  						// we have the final say over context details (in case there
   550  						// is a conflict (if the user added a detail with a context's
   551  						// key for some reason))
   552  					default:
   553  						m.Attrs = append(m.Attrs, backend.LogAttr{Key: d.Key, Value: d.Value})
   554  					}
   555  				}
   556  				m.Attrs = append(m.Attrs,
   557  					backend.LogAttr{Key: nodeKey, Value: msg.Context.NodeID},
   558  					backend.LogAttr{Key: serviceKey, Value: msg.Context.ServiceID},
   559  					backend.LogAttr{Key: taskKey, Value: msg.Context.TaskID},
   560  				)
   561  
   562  				switch msg.Stream {
   563  				case swarmapi.LogStreamStdout:
   564  					m.Source = "stdout"
   565  				case swarmapi.LogStreamStderr:
   566  					m.Source = "stderr"
   567  				}
   568  				m.Line = msg.Data
   569  
   570  				// there could be a case where the reader stops accepting
   571  				// messages and the context is canceled. we need to check that
   572  				// here, or otherwise we risk blocking forever on the message
   573  				// send.
   574  				select {
   575  				case <-ctx.Done():
   576  					return
   577  				case messageChan <- m:
   578  				}
   579  			}
   580  		}
   581  	}()
   582  	return messageChan, nil
   583  }
   584  
   585  // convertSelector takes a backend.LogSelector, which contains raw names that
   586  // may or may not be valid, and converts them to an api.LogSelector proto. It
   587  // returns an error if something fails
   588  func convertSelector(ctx context.Context, cc swarmapi.ControlClient, selector *backend.LogSelector) (*swarmapi.LogSelector, error) {
   589  	// don't rely on swarmkit to resolve IDs, do it ourselves
   590  	swarmSelector := &swarmapi.LogSelector{}
   591  	for _, s := range selector.Services {
   592  		service, err := getService(ctx, cc, s, false)
   593  		if err != nil {
   594  			return nil, err
   595  		}
   596  		c := service.Spec.Task.GetContainer()
   597  		if c == nil {
   598  			return nil, errors.New("logs only supported on container tasks")
   599  		}
   600  		swarmSelector.ServiceIDs = append(swarmSelector.ServiceIDs, service.ID)
   601  	}
   602  	for _, t := range selector.Tasks {
   603  		task, err := getTask(ctx, cc, t)
   604  		if err != nil {
   605  			return nil, err
   606  		}
   607  		c := task.Spec.GetContainer()
   608  		if c == nil {
   609  			return nil, errors.New("logs only supported on container tasks")
   610  		}
   611  		swarmSelector.TaskIDs = append(swarmSelector.TaskIDs, task.ID)
   612  	}
   613  	return swarmSelector, nil
   614  }
   615  
   616  // imageWithDigestString takes an image such as name or name:tag
   617  // and returns the image pinned to a digest, such as name@sha256:34234
   618  func (c *Cluster) imageWithDigestString(ctx context.Context, image string, authConfig *registry.AuthConfig) (string, error) {
   619  	ref, err := reference.ParseAnyReference(image)
   620  	if err != nil {
   621  		return "", err
   622  	}
   623  	namedRef, ok := ref.(reference.Named)
   624  	if !ok {
   625  		if _, ok := ref.(reference.Digested); ok {
   626  			return image, nil
   627  		}
   628  		return "", errors.Errorf("unknown image reference format: %s", image)
   629  	}
   630  	// only query registry if not a canonical reference (i.e. with digest)
   631  	if _, ok := namedRef.(reference.Canonical); !ok {
   632  		namedRef = reference.TagNameOnly(namedRef)
   633  
   634  		taggedRef, ok := namedRef.(reference.NamedTagged)
   635  		if !ok {
   636  			return "", errors.Errorf("image reference not tagged: %s", image)
   637  		}
   638  
   639  		// Fetch the image manifest's digest; if a mirror is configured, try the
   640  		// mirror first, but continue with upstream on failure.
   641  		repos, err := c.config.ImageBackend.GetRepositories(ctx, taggedRef, authConfig)
   642  		if err != nil {
   643  			return "", err
   644  		}
   645  
   646  		var (
   647  			imgDigest digest.Digest
   648  			lastErr   error
   649  		)
   650  		for _, repo := range repos {
   651  			dscrptr, err := repo.Tags(ctx).Get(ctx, taggedRef.Tag())
   652  			if err != nil {
   653  				lastErr = err
   654  				continue
   655  			}
   656  			imgDigest = dscrptr.Digest
   657  		}
   658  		if lastErr != nil {
   659  			return "", lastErr
   660  		}
   661  
   662  		namedDigestedRef, err := reference.WithDigest(taggedRef, imgDigest)
   663  		if err != nil {
   664  			return "", err
   665  		}
   666  		// return familiar form until interface updated to return type
   667  		return reference.FamiliarString(namedDigestedRef), nil
   668  	}
   669  	// reference already contains a digest, so just return it
   670  	return reference.FamiliarString(ref), nil
   671  }
   672  
   673  // digestWarning constructs a formatted warning string
   674  // using the image name that could not be pinned by digest. The
   675  // formatting is hardcoded, but could me made smarter in the future
   676  func digestWarning(image string) string {
   677  	return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image)
   678  }