github.com/Prakhar-Agarwal-byte/moby@v0.0.0-20231027092010-a14e3e8ab87e/daemon/cluster/services.go (about)

     1  package cluster // import "github.com/Prakhar-Agarwal-byte/moby/daemon/cluster"
     2  
     3  import (
     4  	"context"
     5  	"encoding/base64"
     6  	"encoding/json"
     7  	"fmt"
     8  	"io"
     9  	"os"
    10  	"strconv"
    11  	"strings"
    12  	"time"
    13  
    14  	"github.com/containerd/log"
    15  	"github.com/distribution/reference"
    16  	"github.com/Prakhar-Agarwal-byte/moby/api/types"
    17  	"github.com/Prakhar-Agarwal-byte/moby/api/types/backend"
    18  	"github.com/Prakhar-Agarwal-byte/moby/api/types/container"
    19  	"github.com/Prakhar-Agarwal-byte/moby/api/types/registry"
    20  	"github.com/Prakhar-Agarwal-byte/moby/api/types/swarm"
    21  	timetypes "github.com/Prakhar-Agarwal-byte/moby/api/types/time"
    22  	"github.com/Prakhar-Agarwal-byte/moby/daemon/cluster/convert"
    23  	"github.com/Prakhar-Agarwal-byte/moby/errdefs"
    24  	runconfigopts "github.com/Prakhar-Agarwal-byte/moby/runconfig/opts"
    25  	gogotypes "github.com/gogo/protobuf/types"
    26  	swarmapi "github.com/moby/swarmkit/v2/api"
    27  	"github.com/pkg/errors"
    28  	"google.golang.org/grpc"
    29  )
    30  
    31  // GetServices returns all services of a managed swarm cluster.
    32  func (c *Cluster) GetServices(options types.ServiceListOptions) ([]swarm.Service, error) {
    33  	c.mu.RLock()
    34  	defer c.mu.RUnlock()
    35  
    36  	state := c.currentNodeState()
    37  	if !state.IsActiveManager() {
    38  		return nil, c.errNoManager(state)
    39  	}
    40  
    41  	// We move the accepted filter check here as "mode" filter
    42  	// is processed in the daemon, not in SwarmKit. So it might
    43  	// be good to have accepted file check in the same file as
    44  	// the filter processing (in the for loop below).
    45  	accepted := map[string]bool{
    46  		"name":    true,
    47  		"id":      true,
    48  		"label":   true,
    49  		"mode":    true,
    50  		"runtime": true,
    51  	}
    52  	if err := options.Filters.Validate(accepted); err != nil {
    53  		return nil, err
    54  	}
    55  
    56  	if len(options.Filters.Get("runtime")) == 0 {
    57  		// Default to using the container runtime filter
    58  		options.Filters.Add("runtime", string(swarm.RuntimeContainer))
    59  	}
    60  
    61  	filters := &swarmapi.ListServicesRequest_Filters{
    62  		NamePrefixes: options.Filters.Get("name"),
    63  		IDPrefixes:   options.Filters.Get("id"),
    64  		Labels:       runconfigopts.ConvertKVStringsToMap(options.Filters.Get("label")),
    65  		Runtimes:     options.Filters.Get("runtime"),
    66  	}
    67  
    68  	ctx, cancel := c.getRequestContext()
    69  	defer cancel()
    70  
    71  	r, err := state.controlClient.ListServices(
    72  		ctx,
    73  		&swarmapi.ListServicesRequest{Filters: filters},
    74  		grpc.MaxCallRecvMsgSize(defaultRecvSizeForListResponse),
    75  	)
    76  	if err != nil {
    77  		return nil, err
    78  	}
    79  
    80  	services := make([]swarm.Service, 0, len(r.Services))
    81  
    82  	// if the  user requests the service statuses, we'll store the IDs needed
    83  	// in this slice
    84  	var serviceIDs []string
    85  	if options.Status {
    86  		serviceIDs = make([]string, 0, len(r.Services))
    87  	}
    88  	for _, service := range r.Services {
    89  		if options.Filters.Contains("mode") {
    90  			var mode string
    91  			switch service.Spec.GetMode().(type) {
    92  			case *swarmapi.ServiceSpec_Global:
    93  				mode = "global"
    94  			case *swarmapi.ServiceSpec_Replicated:
    95  				mode = "replicated"
    96  			case *swarmapi.ServiceSpec_ReplicatedJob:
    97  				mode = "replicated-job"
    98  			case *swarmapi.ServiceSpec_GlobalJob:
    99  				mode = "global-job"
   100  			}
   101  
   102  			if !options.Filters.ExactMatch("mode", mode) {
   103  				continue
   104  			}
   105  		}
   106  		if options.Status {
   107  			serviceIDs = append(serviceIDs, service.ID)
   108  		}
   109  		svcs, err := convert.ServiceFromGRPC(*service)
   110  		if err != nil {
   111  			return nil, err
   112  		}
   113  		services = append(services, svcs)
   114  	}
   115  
   116  	if options.Status {
   117  		// Listing service statuses is a separate call because, while it is the
   118  		// most common UI operation, it is still just a UI operation, and it
   119  		// would be improper to include this data in swarm's Service object.
   120  		// We pay the cost with some complexity here, but this is still way
   121  		// more efficient than marshalling and unmarshalling all the JSON
   122  		// needed to list tasks and get this data otherwise client-side
   123  		resp, err := state.controlClient.ListServiceStatuses(
   124  			ctx,
   125  			&swarmapi.ListServiceStatusesRequest{Services: serviceIDs},
   126  			grpc.MaxCallRecvMsgSize(defaultRecvSizeForListResponse),
   127  		)
   128  		if err != nil {
   129  			return nil, err
   130  		}
   131  
   132  		// we'll need to match up statuses in the response with the services in
   133  		// the list operation. if we did this by operating on two lists, the
   134  		// result would be quadratic. instead, make a mapping of service IDs to
   135  		// service statuses so that this is roughly linear. additionally,
   136  		// convert the status response to an engine api service status here.
   137  		serviceMap := map[string]*swarm.ServiceStatus{}
   138  		for _, status := range resp.Statuses {
   139  			serviceMap[status.ServiceID] = &swarm.ServiceStatus{
   140  				RunningTasks:   status.RunningTasks,
   141  				DesiredTasks:   status.DesiredTasks,
   142  				CompletedTasks: status.CompletedTasks,
   143  			}
   144  		}
   145  
   146  		// because this is a list of values and not pointers, make sure we
   147  		// actually alter the value when iterating.
   148  		for i, service := range services {
   149  			// the return value of the ListServiceStatuses operation is
   150  			// guaranteed to contain a value in the response for every argument
   151  			// in the request, so we can safely do this assignment. and even if
   152  			// it wasn't, and the service ID was for some reason absent from
   153  			// this map, the resulting value of service.Status would just be
   154  			// nil -- the same thing it was before
   155  			service.ServiceStatus = serviceMap[service.ID]
   156  			services[i] = service
   157  		}
   158  	}
   159  
   160  	return services, nil
   161  }
   162  
   163  // GetService returns a service based on an ID or name.
   164  func (c *Cluster) GetService(input string, insertDefaults bool) (swarm.Service, error) {
   165  	var service *swarmapi.Service
   166  	if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
   167  		s, err := getService(ctx, state.controlClient, input, insertDefaults)
   168  		if err != nil {
   169  			return err
   170  		}
   171  		service = s
   172  		return nil
   173  	}); err != nil {
   174  		return swarm.Service{}, err
   175  	}
   176  	svc, err := convert.ServiceFromGRPC(*service)
   177  	if err != nil {
   178  		return swarm.Service{}, err
   179  	}
   180  	return svc, nil
   181  }
   182  
   183  // CreateService creates a new service in a managed swarm cluster.
   184  func (c *Cluster) CreateService(s swarm.ServiceSpec, encodedAuth string, queryRegistry bool) (*swarm.ServiceCreateResponse, error) {
   185  	var resp *swarm.ServiceCreateResponse
   186  	err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
   187  		err := c.populateNetworkID(ctx, state.controlClient, &s)
   188  		if err != nil {
   189  			return err
   190  		}
   191  
   192  		serviceSpec, err := convert.ServiceSpecToGRPC(s)
   193  		if err != nil {
   194  			return errdefs.InvalidParameter(err)
   195  		}
   196  
   197  		resp = &swarm.ServiceCreateResponse{}
   198  
   199  		switch serviceSpec.Task.Runtime.(type) {
   200  		case *swarmapi.TaskSpec_Attachment:
   201  			return fmt.Errorf("invalid task spec: spec type %q not supported", swarm.RuntimeNetworkAttachment)
   202  		// handle other runtimes here
   203  		case *swarmapi.TaskSpec_Generic:
   204  			switch serviceSpec.Task.GetGeneric().Kind {
   205  			case string(swarm.RuntimePlugin):
   206  				if !c.config.Backend.HasExperimental() {
   207  					return fmt.Errorf("runtime type %q only supported in experimental", swarm.RuntimePlugin)
   208  				}
   209  				if s.TaskTemplate.PluginSpec == nil {
   210  					return errors.New("plugin spec must be set")
   211  				}
   212  
   213  			default:
   214  				return fmt.Errorf("unsupported runtime type: %q", serviceSpec.Task.GetGeneric().Kind)
   215  			}
   216  
   217  			r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec})
   218  			if err != nil {
   219  				return err
   220  			}
   221  
   222  			resp.ID = r.Service.ID
   223  		case *swarmapi.TaskSpec_Container:
   224  			ctnr := serviceSpec.Task.GetContainer()
   225  			if ctnr == nil {
   226  				return errors.New("service does not use container tasks")
   227  			}
   228  			if encodedAuth != "" {
   229  				ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
   230  			}
   231  
   232  			// retrieve auth config from encoded auth
   233  			authConfig := &registry.AuthConfig{}
   234  			if encodedAuth != "" {
   235  				authReader := strings.NewReader(encodedAuth)
   236  				dec := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, authReader))
   237  				if err := dec.Decode(authConfig); err != nil {
   238  					log.G(ctx).Warnf("invalid authconfig: %v", err)
   239  				}
   240  			}
   241  
   242  			// pin image by digest for API versions < 1.30
   243  			// TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE"
   244  			// should be removed in the future. Since integration tests only use the
   245  			// latest API version, so this is no longer required.
   246  			if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry {
   247  				digestImage, err := c.imageWithDigestString(ctx, ctnr.Image, authConfig)
   248  				if err != nil {
   249  					log.G(ctx).Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error())
   250  					// warning in the client response should be concise
   251  					resp.Warnings = append(resp.Warnings, digestWarning(ctnr.Image))
   252  				} else if ctnr.Image != digestImage {
   253  					log.G(ctx).Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage)
   254  					ctnr.Image = digestImage
   255  				} else {
   256  					log.G(ctx).Debugf("creating service using supplied digest reference %s", ctnr.Image)
   257  				}
   258  
   259  				// Replace the context with a fresh one.
   260  				// If we timed out while communicating with the
   261  				// registry, then "ctx" will already be expired, which
   262  				// would cause UpdateService below to fail. Reusing
   263  				// "ctx" could make it impossible to create a service
   264  				// if the registry is slow or unresponsive.
   265  				var cancel func()
   266  				ctx, cancel = c.getRequestContext()
   267  				defer cancel()
   268  			}
   269  
   270  			r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec})
   271  			if err != nil {
   272  				return err
   273  			}
   274  
   275  			resp.ID = r.Service.ID
   276  		}
   277  		return nil
   278  	})
   279  
   280  	return resp, err
   281  }
   282  
   283  // UpdateService updates existing service to match new properties.
   284  func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec swarm.ServiceSpec, flags types.ServiceUpdateOptions, queryRegistry bool) (*swarm.ServiceUpdateResponse, error) {
   285  	var resp *swarm.ServiceUpdateResponse
   286  
   287  	err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
   288  		err := c.populateNetworkID(ctx, state.controlClient, &spec)
   289  		if err != nil {
   290  			return err
   291  		}
   292  
   293  		serviceSpec, err := convert.ServiceSpecToGRPC(spec)
   294  		if err != nil {
   295  			return errdefs.InvalidParameter(err)
   296  		}
   297  
   298  		currentService, err := getService(ctx, state.controlClient, serviceIDOrName, false)
   299  		if err != nil {
   300  			return err
   301  		}
   302  
   303  		resp = &swarm.ServiceUpdateResponse{}
   304  
   305  		switch serviceSpec.Task.Runtime.(type) {
   306  		case *swarmapi.TaskSpec_Attachment:
   307  			return fmt.Errorf("invalid task spec: spec type %q not supported", swarm.RuntimeNetworkAttachment)
   308  		case *swarmapi.TaskSpec_Generic:
   309  			switch serviceSpec.Task.GetGeneric().Kind {
   310  			case string(swarm.RuntimePlugin):
   311  				if spec.TaskTemplate.PluginSpec == nil {
   312  					return errors.New("plugin spec must be set")
   313  				}
   314  			}
   315  		case *swarmapi.TaskSpec_Container:
   316  			newCtnr := serviceSpec.Task.GetContainer()
   317  			if newCtnr == nil {
   318  				return errors.New("service does not use container tasks")
   319  			}
   320  
   321  			encodedAuth := flags.EncodedRegistryAuth
   322  			if encodedAuth != "" {
   323  				newCtnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
   324  			} else {
   325  				// this is needed because if the encodedAuth isn't being updated then we
   326  				// shouldn't lose it, and continue to use the one that was already present
   327  				var ctnr *swarmapi.ContainerSpec
   328  				switch flags.RegistryAuthFrom {
   329  				case types.RegistryAuthFromSpec, "":
   330  					ctnr = currentService.Spec.Task.GetContainer()
   331  				case types.RegistryAuthFromPreviousSpec:
   332  					if currentService.PreviousSpec == nil {
   333  						return errors.New("service does not have a previous spec")
   334  					}
   335  					ctnr = currentService.PreviousSpec.Task.GetContainer()
   336  				default:
   337  					return errors.New("unsupported registryAuthFrom value")
   338  				}
   339  				if ctnr == nil {
   340  					return errors.New("service does not use container tasks")
   341  				}
   342  				newCtnr.PullOptions = ctnr.PullOptions
   343  				// update encodedAuth so it can be used to pin image by digest
   344  				if ctnr.PullOptions != nil {
   345  					encodedAuth = ctnr.PullOptions.RegistryAuth
   346  				}
   347  			}
   348  
   349  			// retrieve auth config from encoded auth
   350  			authConfig := &registry.AuthConfig{}
   351  			if encodedAuth != "" {
   352  				if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil {
   353  					log.G(ctx).Warnf("invalid authconfig: %v", err)
   354  				}
   355  			}
   356  
   357  			// pin image by digest for API versions < 1.30
   358  			// TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE"
   359  			// should be removed in the future. Since integration tests only use the
   360  			// latest API version, so this is no longer required.
   361  			if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry {
   362  				digestImage, err := c.imageWithDigestString(ctx, newCtnr.Image, authConfig)
   363  				if err != nil {
   364  					log.G(ctx).Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error())
   365  					// warning in the client response should be concise
   366  					resp.Warnings = append(resp.Warnings, digestWarning(newCtnr.Image))
   367  				} else if newCtnr.Image != digestImage {
   368  					log.G(ctx).Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage)
   369  					newCtnr.Image = digestImage
   370  				} else {
   371  					log.G(ctx).Debugf("updating service using supplied digest reference %s", newCtnr.Image)
   372  				}
   373  
   374  				// Replace the context with a fresh one.
   375  				// If we timed out while communicating with the
   376  				// registry, then "ctx" will already be expired, which
   377  				// would cause UpdateService below to fail. Reusing
   378  				// "ctx" could make it impossible to update a service
   379  				// if the registry is slow or unresponsive.
   380  				var cancel func()
   381  				ctx, cancel = c.getRequestContext()
   382  				defer cancel()
   383  			}
   384  		}
   385  
   386  		var rollback swarmapi.UpdateServiceRequest_Rollback
   387  		switch flags.Rollback {
   388  		case "", "none":
   389  			rollback = swarmapi.UpdateServiceRequest_NONE
   390  		case "previous":
   391  			rollback = swarmapi.UpdateServiceRequest_PREVIOUS
   392  		default:
   393  			return fmt.Errorf("unrecognized rollback option %s", flags.Rollback)
   394  		}
   395  
   396  		_, err = state.controlClient.UpdateService(
   397  			ctx,
   398  			&swarmapi.UpdateServiceRequest{
   399  				ServiceID: currentService.ID,
   400  				Spec:      &serviceSpec,
   401  				ServiceVersion: &swarmapi.Version{
   402  					Index: version,
   403  				},
   404  				Rollback: rollback,
   405  			},
   406  		)
   407  		return err
   408  	})
   409  	return resp, err
   410  }
   411  
   412  // RemoveService removes a service from a managed swarm cluster.
   413  func (c *Cluster) RemoveService(input string) error {
   414  	return c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
   415  		service, err := getService(ctx, state.controlClient, input, false)
   416  		if err != nil {
   417  			return err
   418  		}
   419  
   420  		_, err = state.controlClient.RemoveService(ctx, &swarmapi.RemoveServiceRequest{ServiceID: service.ID})
   421  		return err
   422  	})
   423  }
   424  
   425  // ServiceLogs collects service logs and writes them back to `config.OutStream`
   426  func (c *Cluster) ServiceLogs(ctx context.Context, selector *backend.LogSelector, config *container.LogsOptions) (<-chan *backend.LogMessage, error) {
   427  	c.mu.RLock()
   428  	defer c.mu.RUnlock()
   429  
   430  	state := c.currentNodeState()
   431  	if !state.IsActiveManager() {
   432  		return nil, c.errNoManager(state)
   433  	}
   434  
   435  	swarmSelector, err := convertSelector(ctx, state.controlClient, selector)
   436  	if err != nil {
   437  		return nil, errors.Wrap(err, "error making log selector")
   438  	}
   439  
   440  	// set the streams we'll use
   441  	stdStreams := []swarmapi.LogStream{}
   442  	if config.ShowStdout {
   443  		stdStreams = append(stdStreams, swarmapi.LogStreamStdout)
   444  	}
   445  	if config.ShowStderr {
   446  		stdStreams = append(stdStreams, swarmapi.LogStreamStderr)
   447  	}
   448  
   449  	// Get tail value squared away - the number of previous log lines we look at
   450  	var tail int64
   451  	// in ContainerLogs, if the tail value is ANYTHING non-integer, we just set
   452  	// it to -1 (all). i don't agree with that, but i also think no tail value
   453  	// should be legitimate. if you don't pass tail, we assume you want "all"
   454  	if config.Tail == "all" || config.Tail == "" {
   455  		// tail of 0 means send all logs on the swarmkit side
   456  		tail = 0
   457  	} else {
   458  		t, err := strconv.Atoi(config.Tail)
   459  		if err != nil {
   460  			return nil, errors.New(`tail value must be a positive integer or "all"`)
   461  		}
   462  		if t < 0 {
   463  			return nil, errors.New("negative tail values not supported")
   464  		}
   465  		// we actually use negative tail in swarmkit to represent messages
   466  		// backwards starting from the beginning. also, -1 means no logs. so,
   467  		// basically, for api compat with docker container logs, add one and
   468  		// flip the sign. we error above if you try to negative tail, which
   469  		// isn't supported by docker (and would error deeper in the stack
   470  		// anyway)
   471  		//
   472  		// See the logs protobuf for more information
   473  		tail = int64(-(t + 1))
   474  	}
   475  
   476  	// get the since value - the time in the past we're looking at logs starting from
   477  	var sinceProto *gogotypes.Timestamp
   478  	if config.Since != "" {
   479  		s, n, err := timetypes.ParseTimestamps(config.Since, 0)
   480  		if err != nil {
   481  			return nil, errors.Wrap(err, "could not parse since timestamp")
   482  		}
   483  		since := time.Unix(s, n)
   484  		sinceProto, err = gogotypes.TimestampProto(since)
   485  		if err != nil {
   486  			return nil, errors.Wrap(err, "could not parse timestamp to proto")
   487  		}
   488  	}
   489  
   490  	stream, err := state.logsClient.SubscribeLogs(ctx, &swarmapi.SubscribeLogsRequest{
   491  		Selector: swarmSelector,
   492  		Options: &swarmapi.LogSubscriptionOptions{
   493  			Follow:  config.Follow,
   494  			Streams: stdStreams,
   495  			Tail:    tail,
   496  			Since:   sinceProto,
   497  		},
   498  	})
   499  	if err != nil {
   500  		return nil, err
   501  	}
   502  
   503  	messageChan := make(chan *backend.LogMessage, 1)
   504  	go func() {
   505  		defer close(messageChan)
   506  		for {
   507  			// Check the context before doing anything.
   508  			select {
   509  			case <-ctx.Done():
   510  				return
   511  			default:
   512  			}
   513  			subscribeMsg, err := stream.Recv()
   514  			if err == io.EOF {
   515  				return
   516  			}
   517  			// if we're not io.EOF, push the message in and return
   518  			if err != nil {
   519  				select {
   520  				case <-ctx.Done():
   521  				case messageChan <- &backend.LogMessage{Err: err}:
   522  				}
   523  				return
   524  			}
   525  
   526  			for _, msg := range subscribeMsg.Messages {
   527  				// make a new message
   528  				m := new(backend.LogMessage)
   529  				m.Attrs = make([]backend.LogAttr, 0, len(msg.Attrs)+3)
   530  				// add the timestamp, adding the error if it fails
   531  				m.Timestamp, err = gogotypes.TimestampFromProto(msg.Timestamp)
   532  				if err != nil {
   533  					m.Err = err
   534  				}
   535  
   536  				nodeKey := contextPrefix + ".node.id"
   537  				serviceKey := contextPrefix + ".service.id"
   538  				taskKey := contextPrefix + ".task.id"
   539  
   540  				// copy over all of the details
   541  				for _, d := range msg.Attrs {
   542  					switch d.Key {
   543  					case nodeKey, serviceKey, taskKey:
   544  						// we have the final say over context details (in case there
   545  						// is a conflict (if the user added a detail with a context's
   546  						// key for some reason))
   547  					default:
   548  						m.Attrs = append(m.Attrs, backend.LogAttr{Key: d.Key, Value: d.Value})
   549  					}
   550  				}
   551  				m.Attrs = append(m.Attrs,
   552  					backend.LogAttr{Key: nodeKey, Value: msg.Context.NodeID},
   553  					backend.LogAttr{Key: serviceKey, Value: msg.Context.ServiceID},
   554  					backend.LogAttr{Key: taskKey, Value: msg.Context.TaskID},
   555  				)
   556  
   557  				switch msg.Stream {
   558  				case swarmapi.LogStreamStdout:
   559  					m.Source = "stdout"
   560  				case swarmapi.LogStreamStderr:
   561  					m.Source = "stderr"
   562  				}
   563  				m.Line = msg.Data
   564  
   565  				// there could be a case where the reader stops accepting
   566  				// messages and the context is canceled. we need to check that
   567  				// here, or otherwise we risk blocking forever on the message
   568  				// send.
   569  				select {
   570  				case <-ctx.Done():
   571  					return
   572  				case messageChan <- m:
   573  				}
   574  			}
   575  		}
   576  	}()
   577  	return messageChan, nil
   578  }
   579  
   580  // convertSelector takes a backend.LogSelector, which contains raw names that
   581  // may or may not be valid, and converts them to an api.LogSelector proto. It
   582  // returns an error if something fails
   583  func convertSelector(ctx context.Context, cc swarmapi.ControlClient, selector *backend.LogSelector) (*swarmapi.LogSelector, error) {
   584  	// don't rely on swarmkit to resolve IDs, do it ourselves
   585  	swarmSelector := &swarmapi.LogSelector{}
   586  	for _, s := range selector.Services {
   587  		service, err := getService(ctx, cc, s, false)
   588  		if err != nil {
   589  			return nil, err
   590  		}
   591  		c := service.Spec.Task.GetContainer()
   592  		if c == nil {
   593  			return nil, errors.New("logs only supported on container tasks")
   594  		}
   595  		swarmSelector.ServiceIDs = append(swarmSelector.ServiceIDs, service.ID)
   596  	}
   597  	for _, t := range selector.Tasks {
   598  		task, err := getTask(ctx, cc, t)
   599  		if err != nil {
   600  			return nil, err
   601  		}
   602  		c := task.Spec.GetContainer()
   603  		if c == nil {
   604  			return nil, errors.New("logs only supported on container tasks")
   605  		}
   606  		swarmSelector.TaskIDs = append(swarmSelector.TaskIDs, task.ID)
   607  	}
   608  	return swarmSelector, nil
   609  }
   610  
   611  // imageWithDigestString takes an image such as name or name:tag
   612  // and returns the image pinned to a digest, such as name@sha256:34234
   613  func (c *Cluster) imageWithDigestString(ctx context.Context, image string, authConfig *registry.AuthConfig) (string, error) {
   614  	ref, err := reference.ParseAnyReference(image)
   615  	if err != nil {
   616  		return "", err
   617  	}
   618  	namedRef, ok := ref.(reference.Named)
   619  	if !ok {
   620  		if _, ok := ref.(reference.Digested); ok {
   621  			return image, nil
   622  		}
   623  		return "", errors.Errorf("unknown image reference format: %s", image)
   624  	}
   625  	// only query registry if not a canonical reference (i.e. with digest)
   626  	if _, ok := namedRef.(reference.Canonical); !ok {
   627  		namedRef = reference.TagNameOnly(namedRef)
   628  
   629  		taggedRef, ok := namedRef.(reference.NamedTagged)
   630  		if !ok {
   631  			return "", errors.Errorf("image reference not tagged: %s", image)
   632  		}
   633  
   634  		repo, err := c.config.ImageBackend.GetRepository(ctx, taggedRef, authConfig)
   635  		if err != nil {
   636  			return "", err
   637  		}
   638  		dscrptr, err := repo.Tags(ctx).Get(ctx, taggedRef.Tag())
   639  		if err != nil {
   640  			return "", err
   641  		}
   642  
   643  		namedDigestedRef, err := reference.WithDigest(taggedRef, dscrptr.Digest)
   644  		if err != nil {
   645  			return "", err
   646  		}
   647  		// return familiar form until interface updated to return type
   648  		return reference.FamiliarString(namedDigestedRef), nil
   649  	}
   650  	// reference already contains a digest, so just return it
   651  	return reference.FamiliarString(ref), nil
   652  }
   653  
   654  // digestWarning constructs a formatted warning string
   655  // using the image name that could not be pinned by digest. The
   656  // formatting is hardcoded, but could me made smarter in the future
   657  func digestWarning(image string) string {
   658  	return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image)
   659  }