github.com/zhouyu0/docker-note@v0.0.0-20190722021225-b8d3825084db/daemon/cluster/services.go (about)

     1  package cluster // import "github.com/docker/docker/daemon/cluster"
     2  
     3  import (
     4  	"context"
     5  	"encoding/base64"
     6  	"encoding/json"
     7  	"fmt"
     8  	"io"
     9  	"os"
    10  	"strconv"
    11  	"strings"
    12  	"time"
    13  
    14  	"github.com/docker/distribution/reference"
    15  	apitypes "github.com/docker/docker/api/types"
    16  	"github.com/docker/docker/api/types/backend"
    17  	types "github.com/docker/docker/api/types/swarm"
    18  	timetypes "github.com/docker/docker/api/types/time"
    19  	"github.com/docker/docker/daemon/cluster/convert"
    20  	"github.com/docker/docker/errdefs"
    21  	runconfigopts "github.com/docker/docker/runconfig/opts"
    22  	swarmapi "github.com/docker/swarmkit/api"
    23  	gogotypes "github.com/gogo/protobuf/types"
    24  	"github.com/pkg/errors"
    25  	"github.com/sirupsen/logrus"
    26  	"google.golang.org/grpc"
    27  )
    28  
    29  // GetServices returns all services of a managed swarm cluster.
    30  func (c *Cluster) GetServices(options apitypes.ServiceListOptions) ([]types.Service, error) {
    31  	c.mu.RLock()
    32  	defer c.mu.RUnlock()
    33  
    34  	state := c.currentNodeState()
    35  	if !state.IsActiveManager() {
    36  		return nil, c.errNoManager(state)
    37  	}
    38  
    39  	// We move the accepted filter check here as "mode" filter
    40  	// is processed in the daemon, not in SwarmKit. So it might
    41  	// be good to have accepted file check in the same file as
    42  	// the filter processing (in the for loop below).
    43  	accepted := map[string]bool{
    44  		"name":    true,
    45  		"id":      true,
    46  		"label":   true,
    47  		"mode":    true,
    48  		"runtime": true,
    49  	}
    50  	if err := options.Filters.Validate(accepted); err != nil {
    51  		return nil, err
    52  	}
    53  
    54  	if len(options.Filters.Get("runtime")) == 0 {
    55  		// Default to using the container runtime filter
    56  		options.Filters.Add("runtime", string(types.RuntimeContainer))
    57  	}
    58  
    59  	filters := &swarmapi.ListServicesRequest_Filters{
    60  		NamePrefixes: options.Filters.Get("name"),
    61  		IDPrefixes:   options.Filters.Get("id"),
    62  		Labels:       runconfigopts.ConvertKVStringsToMap(options.Filters.Get("label")),
    63  		Runtimes:     options.Filters.Get("runtime"),
    64  	}
    65  
    66  	ctx, cancel := c.getRequestContext()
    67  	defer cancel()
    68  
    69  	r, err := state.controlClient.ListServices(
    70  		ctx,
    71  		&swarmapi.ListServicesRequest{Filters: filters},
    72  		grpc.MaxCallRecvMsgSize(defaultRecvSizeForListResponse),
    73  	)
    74  	if err != nil {
    75  		return nil, err
    76  	}
    77  
    78  	services := make([]types.Service, 0, len(r.Services))
    79  
    80  	for _, service := range r.Services {
    81  		if options.Filters.Contains("mode") {
    82  			var mode string
    83  			switch service.Spec.GetMode().(type) {
    84  			case *swarmapi.ServiceSpec_Global:
    85  				mode = "global"
    86  			case *swarmapi.ServiceSpec_Replicated:
    87  				mode = "replicated"
    88  			}
    89  
    90  			if !options.Filters.ExactMatch("mode", mode) {
    91  				continue
    92  			}
    93  		}
    94  		svcs, err := convert.ServiceFromGRPC(*service)
    95  		if err != nil {
    96  			return nil, err
    97  		}
    98  		services = append(services, svcs)
    99  	}
   100  
   101  	return services, nil
   102  }
   103  
   104  // GetService returns a service based on an ID or name.
   105  func (c *Cluster) GetService(input string, insertDefaults bool) (types.Service, error) {
   106  	var service *swarmapi.Service
   107  	if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
   108  		s, err := getService(ctx, state.controlClient, input, insertDefaults)
   109  		if err != nil {
   110  			return err
   111  		}
   112  		service = s
   113  		return nil
   114  	}); err != nil {
   115  		return types.Service{}, err
   116  	}
   117  	svc, err := convert.ServiceFromGRPC(*service)
   118  	if err != nil {
   119  		return types.Service{}, err
   120  	}
   121  	return svc, nil
   122  }
   123  
   124  // CreateService creates a new service in a managed swarm cluster.
   125  func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string, queryRegistry bool) (*apitypes.ServiceCreateResponse, error) {
   126  	var resp *apitypes.ServiceCreateResponse
   127  	err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
   128  		err := c.populateNetworkID(ctx, state.controlClient, &s)
   129  		if err != nil {
   130  			return err
   131  		}
   132  
   133  		serviceSpec, err := convert.ServiceSpecToGRPC(s)
   134  		if err != nil {
   135  			return errdefs.InvalidParameter(err)
   136  		}
   137  
   138  		resp = &apitypes.ServiceCreateResponse{}
   139  
   140  		switch serviceSpec.Task.Runtime.(type) {
   141  		case *swarmapi.TaskSpec_Attachment:
   142  			return fmt.Errorf("invalid task spec: spec type %q not supported", types.RuntimeNetworkAttachment)
   143  		// handle other runtimes here
   144  		case *swarmapi.TaskSpec_Generic:
   145  			switch serviceSpec.Task.GetGeneric().Kind {
   146  			case string(types.RuntimePlugin):
   147  				info, _ := c.config.Backend.SystemInfo()
   148  				if !info.ExperimentalBuild {
   149  					return fmt.Errorf("runtime type %q only supported in experimental", types.RuntimePlugin)
   150  				}
   151  				if s.TaskTemplate.PluginSpec == nil {
   152  					return errors.New("plugin spec must be set")
   153  				}
   154  
   155  			default:
   156  				return fmt.Errorf("unsupported runtime type: %q", serviceSpec.Task.GetGeneric().Kind)
   157  			}
   158  
   159  			r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec})
   160  			if err != nil {
   161  				return err
   162  			}
   163  
   164  			resp.ID = r.Service.ID
   165  		case *swarmapi.TaskSpec_Container:
   166  			ctnr := serviceSpec.Task.GetContainer()
   167  			if ctnr == nil {
   168  				return errors.New("service does not use container tasks")
   169  			}
   170  			if encodedAuth != "" {
   171  				ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
   172  			}
   173  
   174  			// retrieve auth config from encoded auth
   175  			authConfig := &apitypes.AuthConfig{}
   176  			if encodedAuth != "" {
   177  				authReader := strings.NewReader(encodedAuth)
   178  				dec := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, authReader))
   179  				if err := dec.Decode(authConfig); err != nil {
   180  					logrus.Warnf("invalid authconfig: %v", err)
   181  				}
   182  			}
   183  
   184  			// pin image by digest for API versions < 1.30
   185  			// TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE"
   186  			// should be removed in the future. Since integration tests only use the
   187  			// latest API version, so this is no longer required.
   188  			if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry {
   189  				digestImage, err := c.imageWithDigestString(ctx, ctnr.Image, authConfig)
   190  				if err != nil {
   191  					logrus.Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error())
   192  					// warning in the client response should be concise
   193  					resp.Warnings = append(resp.Warnings, digestWarning(ctnr.Image))
   194  
   195  				} else if ctnr.Image != digestImage {
   196  					logrus.Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage)
   197  					ctnr.Image = digestImage
   198  
   199  				} else {
   200  					logrus.Debugf("creating service using supplied digest reference %s", ctnr.Image)
   201  
   202  				}
   203  
   204  				// Replace the context with a fresh one.
   205  				// If we timed out while communicating with the
   206  				// registry, then "ctx" will already be expired, which
   207  				// would cause UpdateService below to fail. Reusing
   208  				// "ctx" could make it impossible to create a service
   209  				// if the registry is slow or unresponsive.
   210  				var cancel func()
   211  				ctx, cancel = c.getRequestContext()
   212  				defer cancel()
   213  			}
   214  
   215  			r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec})
   216  			if err != nil {
   217  				return err
   218  			}
   219  
   220  			resp.ID = r.Service.ID
   221  		}
   222  		return nil
   223  	})
   224  
   225  	return resp, err
   226  }
   227  
   228  // UpdateService updates existing service to match new properties.
   229  func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec types.ServiceSpec, flags apitypes.ServiceUpdateOptions, queryRegistry bool) (*apitypes.ServiceUpdateResponse, error) {
   230  	var resp *apitypes.ServiceUpdateResponse
   231  
   232  	err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
   233  
   234  		err := c.populateNetworkID(ctx, state.controlClient, &spec)
   235  		if err != nil {
   236  			return err
   237  		}
   238  
   239  		serviceSpec, err := convert.ServiceSpecToGRPC(spec)
   240  		if err != nil {
   241  			return errdefs.InvalidParameter(err)
   242  		}
   243  
   244  		currentService, err := getService(ctx, state.controlClient, serviceIDOrName, false)
   245  		if err != nil {
   246  			return err
   247  		}
   248  
   249  		resp = &apitypes.ServiceUpdateResponse{}
   250  
   251  		switch serviceSpec.Task.Runtime.(type) {
   252  		case *swarmapi.TaskSpec_Attachment:
   253  			return fmt.Errorf("invalid task spec: spec type %q not supported", types.RuntimeNetworkAttachment)
   254  		case *swarmapi.TaskSpec_Generic:
   255  			switch serviceSpec.Task.GetGeneric().Kind {
   256  			case string(types.RuntimePlugin):
   257  				if spec.TaskTemplate.PluginSpec == nil {
   258  					return errors.New("plugin spec must be set")
   259  				}
   260  			}
   261  		case *swarmapi.TaskSpec_Container:
   262  			newCtnr := serviceSpec.Task.GetContainer()
   263  			if newCtnr == nil {
   264  				return errors.New("service does not use container tasks")
   265  			}
   266  
   267  			encodedAuth := flags.EncodedRegistryAuth
   268  			if encodedAuth != "" {
   269  				newCtnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
   270  			} else {
   271  				// this is needed because if the encodedAuth isn't being updated then we
   272  				// shouldn't lose it, and continue to use the one that was already present
   273  				var ctnr *swarmapi.ContainerSpec
   274  				switch flags.RegistryAuthFrom {
   275  				case apitypes.RegistryAuthFromSpec, "":
   276  					ctnr = currentService.Spec.Task.GetContainer()
   277  				case apitypes.RegistryAuthFromPreviousSpec:
   278  					if currentService.PreviousSpec == nil {
   279  						return errors.New("service does not have a previous spec")
   280  					}
   281  					ctnr = currentService.PreviousSpec.Task.GetContainer()
   282  				default:
   283  					return errors.New("unsupported registryAuthFrom value")
   284  				}
   285  				if ctnr == nil {
   286  					return errors.New("service does not use container tasks")
   287  				}
   288  				newCtnr.PullOptions = ctnr.PullOptions
   289  				// update encodedAuth so it can be used to pin image by digest
   290  				if ctnr.PullOptions != nil {
   291  					encodedAuth = ctnr.PullOptions.RegistryAuth
   292  				}
   293  			}
   294  
   295  			// retrieve auth config from encoded auth
   296  			authConfig := &apitypes.AuthConfig{}
   297  			if encodedAuth != "" {
   298  				if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil {
   299  					logrus.Warnf("invalid authconfig: %v", err)
   300  				}
   301  			}
   302  
   303  			// pin image by digest for API versions < 1.30
   304  			// TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE"
   305  			// should be removed in the future. Since integration tests only use the
   306  			// latest API version, so this is no longer required.
   307  			if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry {
   308  				digestImage, err := c.imageWithDigestString(ctx, newCtnr.Image, authConfig)
   309  				if err != nil {
   310  					logrus.Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error())
   311  					// warning in the client response should be concise
   312  					resp.Warnings = append(resp.Warnings, digestWarning(newCtnr.Image))
   313  				} else if newCtnr.Image != digestImage {
   314  					logrus.Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage)
   315  					newCtnr.Image = digestImage
   316  				} else {
   317  					logrus.Debugf("updating service using supplied digest reference %s", newCtnr.Image)
   318  				}
   319  
   320  				// Replace the context with a fresh one.
   321  				// If we timed out while communicating with the
   322  				// registry, then "ctx" will already be expired, which
   323  				// would cause UpdateService below to fail. Reusing
   324  				// "ctx" could make it impossible to update a service
   325  				// if the registry is slow or unresponsive.
   326  				var cancel func()
   327  				ctx, cancel = c.getRequestContext()
   328  				defer cancel()
   329  			}
   330  		}
   331  
   332  		var rollback swarmapi.UpdateServiceRequest_Rollback
   333  		switch flags.Rollback {
   334  		case "", "none":
   335  			rollback = swarmapi.UpdateServiceRequest_NONE
   336  		case "previous":
   337  			rollback = swarmapi.UpdateServiceRequest_PREVIOUS
   338  		default:
   339  			return fmt.Errorf("unrecognized rollback option %s", flags.Rollback)
   340  		}
   341  
   342  		_, err = state.controlClient.UpdateService(
   343  			ctx,
   344  			&swarmapi.UpdateServiceRequest{
   345  				ServiceID: currentService.ID,
   346  				Spec:      &serviceSpec,
   347  				ServiceVersion: &swarmapi.Version{
   348  					Index: version,
   349  				},
   350  				Rollback: rollback,
   351  			},
   352  		)
   353  		return err
   354  	})
   355  	return resp, err
   356  }
   357  
   358  // RemoveService removes a service from a managed swarm cluster.
   359  func (c *Cluster) RemoveService(input string) error {
   360  	return c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
   361  		service, err := getService(ctx, state.controlClient, input, false)
   362  		if err != nil {
   363  			return err
   364  		}
   365  
   366  		_, err = state.controlClient.RemoveService(ctx, &swarmapi.RemoveServiceRequest{ServiceID: service.ID})
   367  		return err
   368  	})
   369  }
   370  
   371  // ServiceLogs collects service logs and writes them back to `config.OutStream`
   372  func (c *Cluster) ServiceLogs(ctx context.Context, selector *backend.LogSelector, config *apitypes.ContainerLogsOptions) (<-chan *backend.LogMessage, error) {
   373  	c.mu.RLock()
   374  	defer c.mu.RUnlock()
   375  
   376  	state := c.currentNodeState()
   377  	if !state.IsActiveManager() {
   378  		return nil, c.errNoManager(state)
   379  	}
   380  
   381  	swarmSelector, err := convertSelector(ctx, state.controlClient, selector)
   382  	if err != nil {
   383  		return nil, errors.Wrap(err, "error making log selector")
   384  	}
   385  
   386  	// set the streams we'll use
   387  	stdStreams := []swarmapi.LogStream{}
   388  	if config.ShowStdout {
   389  		stdStreams = append(stdStreams, swarmapi.LogStreamStdout)
   390  	}
   391  	if config.ShowStderr {
   392  		stdStreams = append(stdStreams, swarmapi.LogStreamStderr)
   393  	}
   394  
   395  	// Get tail value squared away - the number of previous log lines we look at
   396  	var tail int64
   397  	// in ContainerLogs, if the tail value is ANYTHING non-integer, we just set
   398  	// it to -1 (all). i don't agree with that, but i also think no tail value
   399  	// should be legitimate. if you don't pass tail, we assume you want "all"
   400  	if config.Tail == "all" || config.Tail == "" {
   401  		// tail of 0 means send all logs on the swarmkit side
   402  		tail = 0
   403  	} else {
   404  		t, err := strconv.Atoi(config.Tail)
   405  		if err != nil {
   406  			return nil, errors.New("tail value must be a positive integer or \"all\"")
   407  		}
   408  		if t < 0 {
   409  			return nil, errors.New("negative tail values not supported")
   410  		}
   411  		// we actually use negative tail in swarmkit to represent messages
   412  		// backwards starting from the beginning. also, -1 means no logs. so,
   413  		// basically, for api compat with docker container logs, add one and
   414  		// flip the sign. we error above if you try to negative tail, which
   415  		// isn't supported by docker (and would error deeper in the stack
   416  		// anyway)
   417  		//
   418  		// See the logs protobuf for more information
   419  		tail = int64(-(t + 1))
   420  	}
   421  
   422  	// get the since value - the time in the past we're looking at logs starting from
   423  	var sinceProto *gogotypes.Timestamp
   424  	if config.Since != "" {
   425  		s, n, err := timetypes.ParseTimestamps(config.Since, 0)
   426  		if err != nil {
   427  			return nil, errors.Wrap(err, "could not parse since timestamp")
   428  		}
   429  		since := time.Unix(s, n)
   430  		sinceProto, err = gogotypes.TimestampProto(since)
   431  		if err != nil {
   432  			return nil, errors.Wrap(err, "could not parse timestamp to proto")
   433  		}
   434  	}
   435  
   436  	stream, err := state.logsClient.SubscribeLogs(ctx, &swarmapi.SubscribeLogsRequest{
   437  		Selector: swarmSelector,
   438  		Options: &swarmapi.LogSubscriptionOptions{
   439  			Follow:  config.Follow,
   440  			Streams: stdStreams,
   441  			Tail:    tail,
   442  			Since:   sinceProto,
   443  		},
   444  	})
   445  	if err != nil {
   446  		return nil, err
   447  	}
   448  
   449  	messageChan := make(chan *backend.LogMessage, 1)
   450  	go func() {
   451  		defer close(messageChan)
   452  		for {
   453  			// Check the context before doing anything.
   454  			select {
   455  			case <-ctx.Done():
   456  				return
   457  			default:
   458  			}
   459  			subscribeMsg, err := stream.Recv()
   460  			if err == io.EOF {
   461  				return
   462  			}
   463  			// if we're not io.EOF, push the message in and return
   464  			if err != nil {
   465  				select {
   466  				case <-ctx.Done():
   467  				case messageChan <- &backend.LogMessage{Err: err}:
   468  				}
   469  				return
   470  			}
   471  
   472  			for _, msg := range subscribeMsg.Messages {
   473  				// make a new message
   474  				m := new(backend.LogMessage)
   475  				m.Attrs = make([]backend.LogAttr, 0, len(msg.Attrs)+3)
   476  				// add the timestamp, adding the error if it fails
   477  				m.Timestamp, err = gogotypes.TimestampFromProto(msg.Timestamp)
   478  				if err != nil {
   479  					m.Err = err
   480  				}
   481  
   482  				nodeKey := contextPrefix + ".node.id"
   483  				serviceKey := contextPrefix + ".service.id"
   484  				taskKey := contextPrefix + ".task.id"
   485  
   486  				// copy over all of the details
   487  				for _, d := range msg.Attrs {
   488  					switch d.Key {
   489  					case nodeKey, serviceKey, taskKey:
   490  						// we have the final say over context details (in case there
   491  						// is a conflict (if the user added a detail with a context's
   492  						// key for some reason))
   493  					default:
   494  						m.Attrs = append(m.Attrs, backend.LogAttr{Key: d.Key, Value: d.Value})
   495  					}
   496  				}
   497  				m.Attrs = append(m.Attrs,
   498  					backend.LogAttr{Key: nodeKey, Value: msg.Context.NodeID},
   499  					backend.LogAttr{Key: serviceKey, Value: msg.Context.ServiceID},
   500  					backend.LogAttr{Key: taskKey, Value: msg.Context.TaskID},
   501  				)
   502  
   503  				switch msg.Stream {
   504  				case swarmapi.LogStreamStdout:
   505  					m.Source = "stdout"
   506  				case swarmapi.LogStreamStderr:
   507  					m.Source = "stderr"
   508  				}
   509  				m.Line = msg.Data
   510  
   511  				// there could be a case where the reader stops accepting
   512  				// messages and the context is canceled. we need to check that
   513  				// here, or otherwise we risk blocking forever on the message
   514  				// send.
   515  				select {
   516  				case <-ctx.Done():
   517  					return
   518  				case messageChan <- m:
   519  				}
   520  			}
   521  		}
   522  	}()
   523  	return messageChan, nil
   524  }
   525  
   526  // convertSelector takes a backend.LogSelector, which contains raw names that
   527  // may or may not be valid, and converts them to an api.LogSelector proto. It
   528  // returns an error if something fails
   529  func convertSelector(ctx context.Context, cc swarmapi.ControlClient, selector *backend.LogSelector) (*swarmapi.LogSelector, error) {
   530  	// don't rely on swarmkit to resolve IDs, do it ourselves
   531  	swarmSelector := &swarmapi.LogSelector{}
   532  	for _, s := range selector.Services {
   533  		service, err := getService(ctx, cc, s, false)
   534  		if err != nil {
   535  			return nil, err
   536  		}
   537  		c := service.Spec.Task.GetContainer()
   538  		if c == nil {
   539  			return nil, errors.New("logs only supported on container tasks")
   540  		}
   541  		swarmSelector.ServiceIDs = append(swarmSelector.ServiceIDs, service.ID)
   542  	}
   543  	for _, t := range selector.Tasks {
   544  		task, err := getTask(ctx, cc, t)
   545  		if err != nil {
   546  			return nil, err
   547  		}
   548  		c := task.Spec.GetContainer()
   549  		if c == nil {
   550  			return nil, errors.New("logs only supported on container tasks")
   551  		}
   552  		swarmSelector.TaskIDs = append(swarmSelector.TaskIDs, task.ID)
   553  	}
   554  	return swarmSelector, nil
   555  }
   556  
   557  // imageWithDigestString takes an image such as name or name:tag
   558  // and returns the image pinned to a digest, such as name@sha256:34234
   559  func (c *Cluster) imageWithDigestString(ctx context.Context, image string, authConfig *apitypes.AuthConfig) (string, error) {
   560  	ref, err := reference.ParseAnyReference(image)
   561  	if err != nil {
   562  		return "", err
   563  	}
   564  	namedRef, ok := ref.(reference.Named)
   565  	if !ok {
   566  		if _, ok := ref.(reference.Digested); ok {
   567  			return image, nil
   568  		}
   569  		return "", errors.Errorf("unknown image reference format: %s", image)
   570  	}
   571  	// only query registry if not a canonical reference (i.e. with digest)
   572  	if _, ok := namedRef.(reference.Canonical); !ok {
   573  		namedRef = reference.TagNameOnly(namedRef)
   574  
   575  		taggedRef, ok := namedRef.(reference.NamedTagged)
   576  		if !ok {
   577  			return "", errors.Errorf("image reference not tagged: %s", image)
   578  		}
   579  
   580  		repo, _, err := c.config.ImageBackend.GetRepository(ctx, taggedRef, authConfig)
   581  		if err != nil {
   582  			return "", err
   583  		}
   584  		dscrptr, err := repo.Tags(ctx).Get(ctx, taggedRef.Tag())
   585  		if err != nil {
   586  			return "", err
   587  		}
   588  
   589  		namedDigestedRef, err := reference.WithDigest(taggedRef, dscrptr.Digest)
   590  		if err != nil {
   591  			return "", err
   592  		}
   593  		// return familiar form until interface updated to return type
   594  		return reference.FamiliarString(namedDigestedRef), nil
   595  	}
   596  	// reference already contains a digest, so just return it
   597  	return reference.FamiliarString(ref), nil
   598  }
   599  
   600  // digestWarning constructs a formatted warning string
   601  // using the image name that could not be pinned by digest. The
   602  // formatting is hardcoded, but could me made smarter in the future
   603  func digestWarning(image string) string {
   604  	return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image)
   605  }