github.com/rhatdan/docker@v0.7.7-0.20180119204836-47a0dcbcd20a/daemon/cluster/services.go (about)

     1  package cluster
     2  
     3  import (
     4  	"encoding/base64"
     5  	"encoding/json"
     6  	"fmt"
     7  	"io"
     8  	"os"
     9  	"strconv"
    10  	"strings"
    11  	"time"
    12  
    13  	"github.com/docker/distribution/reference"
    14  	apitypes "github.com/docker/docker/api/types"
    15  	"github.com/docker/docker/api/types/backend"
    16  	types "github.com/docker/docker/api/types/swarm"
    17  	timetypes "github.com/docker/docker/api/types/time"
    18  	"github.com/docker/docker/daemon/cluster/convert"
    19  	"github.com/docker/docker/errdefs"
    20  	runconfigopts "github.com/docker/docker/runconfig/opts"
    21  	swarmapi "github.com/docker/swarmkit/api"
    22  	gogotypes "github.com/gogo/protobuf/types"
    23  	"github.com/pkg/errors"
    24  	"github.com/sirupsen/logrus"
    25  	"golang.org/x/net/context"
    26  )
    27  
    28  // GetServices returns all services of a managed swarm cluster.
    29  func (c *Cluster) GetServices(options apitypes.ServiceListOptions) ([]types.Service, error) {
    30  	c.mu.RLock()
    31  	defer c.mu.RUnlock()
    32  
    33  	state := c.currentNodeState()
    34  	if !state.IsActiveManager() {
    35  		return nil, c.errNoManager(state)
    36  	}
    37  
    38  	// We move the accepted filter check here as "mode" filter
    39  	// is processed in the daemon, not in SwarmKit. So it might
    40  	// be good to have accepted file check in the same file as
    41  	// the filter processing (in the for loop below).
    42  	accepted := map[string]bool{
    43  		"name":    true,
    44  		"id":      true,
    45  		"label":   true,
    46  		"mode":    true,
    47  		"runtime": true,
    48  	}
    49  	if err := options.Filters.Validate(accepted); err != nil {
    50  		return nil, err
    51  	}
    52  
    53  	if len(options.Filters.Get("runtime")) == 0 {
    54  		// Default to using the container runtime filter
    55  		options.Filters.Add("runtime", string(types.RuntimeContainer))
    56  	}
    57  
    58  	filters := &swarmapi.ListServicesRequest_Filters{
    59  		NamePrefixes: options.Filters.Get("name"),
    60  		IDPrefixes:   options.Filters.Get("id"),
    61  		Labels:       runconfigopts.ConvertKVStringsToMap(options.Filters.Get("label")),
    62  		Runtimes:     options.Filters.Get("runtime"),
    63  	}
    64  
    65  	ctx, cancel := c.getRequestContext()
    66  	defer cancel()
    67  
    68  	r, err := state.controlClient.ListServices(
    69  		ctx,
    70  		&swarmapi.ListServicesRequest{Filters: filters})
    71  	if err != nil {
    72  		return nil, err
    73  	}
    74  
    75  	services := make([]types.Service, 0, len(r.Services))
    76  
    77  	for _, service := range r.Services {
    78  		if options.Filters.Contains("mode") {
    79  			var mode string
    80  			switch service.Spec.GetMode().(type) {
    81  			case *swarmapi.ServiceSpec_Global:
    82  				mode = "global"
    83  			case *swarmapi.ServiceSpec_Replicated:
    84  				mode = "replicated"
    85  			}
    86  
    87  			if !options.Filters.ExactMatch("mode", mode) {
    88  				continue
    89  			}
    90  		}
    91  		svcs, err := convert.ServiceFromGRPC(*service)
    92  		if err != nil {
    93  			return nil, err
    94  		}
    95  		services = append(services, svcs)
    96  	}
    97  
    98  	return services, nil
    99  }
   100  
   101  // GetService returns a service based on an ID or name.
   102  func (c *Cluster) GetService(input string, insertDefaults bool) (types.Service, error) {
   103  	var service *swarmapi.Service
   104  	if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
   105  		s, err := getService(ctx, state.controlClient, input, insertDefaults)
   106  		if err != nil {
   107  			return err
   108  		}
   109  		service = s
   110  		return nil
   111  	}); err != nil {
   112  		return types.Service{}, err
   113  	}
   114  	svc, err := convert.ServiceFromGRPC(*service)
   115  	if err != nil {
   116  		return types.Service{}, err
   117  	}
   118  	return svc, nil
   119  }
   120  
   121  // CreateService creates a new service in a managed swarm cluster.
   122  func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string, queryRegistry bool) (*apitypes.ServiceCreateResponse, error) {
   123  	var resp *apitypes.ServiceCreateResponse
   124  	err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
   125  		err := c.populateNetworkID(ctx, state.controlClient, &s)
   126  		if err != nil {
   127  			return err
   128  		}
   129  
   130  		serviceSpec, err := convert.ServiceSpecToGRPC(s)
   131  		if err != nil {
   132  			return errdefs.InvalidParameter(err)
   133  		}
   134  
   135  		resp = &apitypes.ServiceCreateResponse{}
   136  
   137  		switch serviceSpec.Task.Runtime.(type) {
   138  		// handle other runtimes here
   139  		case *swarmapi.TaskSpec_Generic:
   140  			switch serviceSpec.Task.GetGeneric().Kind {
   141  			case string(types.RuntimePlugin):
   142  				info, _ := c.config.Backend.SystemInfo()
   143  				if !info.ExperimentalBuild {
   144  					return fmt.Errorf("runtime type %q only supported in experimental", types.RuntimePlugin)
   145  				}
   146  				if s.TaskTemplate.PluginSpec == nil {
   147  					return errors.New("plugin spec must be set")
   148  				}
   149  
   150  			default:
   151  				return fmt.Errorf("unsupported runtime type: %q", serviceSpec.Task.GetGeneric().Kind)
   152  			}
   153  
   154  			r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec})
   155  			if err != nil {
   156  				return err
   157  			}
   158  
   159  			resp.ID = r.Service.ID
   160  		case *swarmapi.TaskSpec_Container:
   161  			ctnr := serviceSpec.Task.GetContainer()
   162  			if ctnr == nil {
   163  				return errors.New("service does not use container tasks")
   164  			}
   165  			if encodedAuth != "" {
   166  				ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
   167  			}
   168  
   169  			// retrieve auth config from encoded auth
   170  			authConfig := &apitypes.AuthConfig{}
   171  			if encodedAuth != "" {
   172  				authReader := strings.NewReader(encodedAuth)
   173  				dec := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, authReader))
   174  				if err := dec.Decode(authConfig); err != nil {
   175  					logrus.Warnf("invalid authconfig: %v", err)
   176  				}
   177  			}
   178  
   179  			// pin image by digest for API versions < 1.30
   180  			// TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE"
   181  			// should be removed in the future. Since integration tests only use the
   182  			// latest API version, so this is no longer required.
   183  			if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry {
   184  				digestImage, err := c.imageWithDigestString(ctx, ctnr.Image, authConfig)
   185  				if err != nil {
   186  					logrus.Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error())
   187  					// warning in the client response should be concise
   188  					resp.Warnings = append(resp.Warnings, digestWarning(ctnr.Image))
   189  
   190  				} else if ctnr.Image != digestImage {
   191  					logrus.Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage)
   192  					ctnr.Image = digestImage
   193  
   194  				} else {
   195  					logrus.Debugf("creating service using supplied digest reference %s", ctnr.Image)
   196  
   197  				}
   198  
   199  				// Replace the context with a fresh one.
   200  				// If we timed out while communicating with the
   201  				// registry, then "ctx" will already be expired, which
   202  				// would cause UpdateService below to fail. Reusing
   203  				// "ctx" could make it impossible to create a service
   204  				// if the registry is slow or unresponsive.
   205  				var cancel func()
   206  				ctx, cancel = c.getRequestContext()
   207  				defer cancel()
   208  			}
   209  
   210  			r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec})
   211  			if err != nil {
   212  				return err
   213  			}
   214  
   215  			resp.ID = r.Service.ID
   216  		}
   217  		return nil
   218  	})
   219  
   220  	return resp, err
   221  }
   222  
   223  // UpdateService updates existing service to match new properties.
   224  func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec types.ServiceSpec, flags apitypes.ServiceUpdateOptions, queryRegistry bool) (*apitypes.ServiceUpdateResponse, error) {
   225  	var resp *apitypes.ServiceUpdateResponse
   226  
   227  	err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
   228  
   229  		err := c.populateNetworkID(ctx, state.controlClient, &spec)
   230  		if err != nil {
   231  			return err
   232  		}
   233  
   234  		serviceSpec, err := convert.ServiceSpecToGRPC(spec)
   235  		if err != nil {
   236  			return errdefs.InvalidParameter(err)
   237  		}
   238  
   239  		currentService, err := getService(ctx, state.controlClient, serviceIDOrName, false)
   240  		if err != nil {
   241  			return err
   242  		}
   243  
   244  		resp = &apitypes.ServiceUpdateResponse{}
   245  
   246  		switch serviceSpec.Task.Runtime.(type) {
   247  		case *swarmapi.TaskSpec_Generic:
   248  			switch serviceSpec.Task.GetGeneric().Kind {
   249  			case string(types.RuntimePlugin):
   250  				if spec.TaskTemplate.PluginSpec == nil {
   251  					return errors.New("plugin spec must be set")
   252  				}
   253  			}
   254  		case *swarmapi.TaskSpec_Container:
   255  			newCtnr := serviceSpec.Task.GetContainer()
   256  			if newCtnr == nil {
   257  				return errors.New("service does not use container tasks")
   258  			}
   259  
   260  			encodedAuth := flags.EncodedRegistryAuth
   261  			if encodedAuth != "" {
   262  				newCtnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
   263  			} else {
   264  				// this is needed because if the encodedAuth isn't being updated then we
   265  				// shouldn't lose it, and continue to use the one that was already present
   266  				var ctnr *swarmapi.ContainerSpec
   267  				switch flags.RegistryAuthFrom {
   268  				case apitypes.RegistryAuthFromSpec, "":
   269  					ctnr = currentService.Spec.Task.GetContainer()
   270  				case apitypes.RegistryAuthFromPreviousSpec:
   271  					if currentService.PreviousSpec == nil {
   272  						return errors.New("service does not have a previous spec")
   273  					}
   274  					ctnr = currentService.PreviousSpec.Task.GetContainer()
   275  				default:
   276  					return errors.New("unsupported registryAuthFrom value")
   277  				}
   278  				if ctnr == nil {
   279  					return errors.New("service does not use container tasks")
   280  				}
   281  				newCtnr.PullOptions = ctnr.PullOptions
   282  				// update encodedAuth so it can be used to pin image by digest
   283  				if ctnr.PullOptions != nil {
   284  					encodedAuth = ctnr.PullOptions.RegistryAuth
   285  				}
   286  			}
   287  
   288  			// retrieve auth config from encoded auth
   289  			authConfig := &apitypes.AuthConfig{}
   290  			if encodedAuth != "" {
   291  				if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil {
   292  					logrus.Warnf("invalid authconfig: %v", err)
   293  				}
   294  			}
   295  
   296  			// pin image by digest for API versions < 1.30
   297  			// TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE"
   298  			// should be removed in the future. Since integration tests only use the
   299  			// latest API version, so this is no longer required.
   300  			if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry {
   301  				digestImage, err := c.imageWithDigestString(ctx, newCtnr.Image, authConfig)
   302  				if err != nil {
   303  					logrus.Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error())
   304  					// warning in the client response should be concise
   305  					resp.Warnings = append(resp.Warnings, digestWarning(newCtnr.Image))
   306  				} else if newCtnr.Image != digestImage {
   307  					logrus.Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage)
   308  					newCtnr.Image = digestImage
   309  				} else {
   310  					logrus.Debugf("updating service using supplied digest reference %s", newCtnr.Image)
   311  				}
   312  
   313  				// Replace the context with a fresh one.
   314  				// If we timed out while communicating with the
   315  				// registry, then "ctx" will already be expired, which
   316  				// would cause UpdateService below to fail. Reusing
   317  				// "ctx" could make it impossible to update a service
   318  				// if the registry is slow or unresponsive.
   319  				var cancel func()
   320  				ctx, cancel = c.getRequestContext()
   321  				defer cancel()
   322  			}
   323  		}
   324  
   325  		var rollback swarmapi.UpdateServiceRequest_Rollback
   326  		switch flags.Rollback {
   327  		case "", "none":
   328  			rollback = swarmapi.UpdateServiceRequest_NONE
   329  		case "previous":
   330  			rollback = swarmapi.UpdateServiceRequest_PREVIOUS
   331  		default:
   332  			return fmt.Errorf("unrecognized rollback option %s", flags.Rollback)
   333  		}
   334  
   335  		_, err = state.controlClient.UpdateService(
   336  			ctx,
   337  			&swarmapi.UpdateServiceRequest{
   338  				ServiceID: currentService.ID,
   339  				Spec:      &serviceSpec,
   340  				ServiceVersion: &swarmapi.Version{
   341  					Index: version,
   342  				},
   343  				Rollback: rollback,
   344  			},
   345  		)
   346  		return err
   347  	})
   348  	return resp, err
   349  }
   350  
   351  // RemoveService removes a service from a managed swarm cluster.
   352  func (c *Cluster) RemoveService(input string) error {
   353  	return c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
   354  		service, err := getService(ctx, state.controlClient, input, false)
   355  		if err != nil {
   356  			return err
   357  		}
   358  
   359  		_, err = state.controlClient.RemoveService(ctx, &swarmapi.RemoveServiceRequest{ServiceID: service.ID})
   360  		return err
   361  	})
   362  }
   363  
   364  // ServiceLogs collects service logs and writes them back to `config.OutStream`
   365  func (c *Cluster) ServiceLogs(ctx context.Context, selector *backend.LogSelector, config *apitypes.ContainerLogsOptions) (<-chan *backend.LogMessage, error) {
   366  	c.mu.RLock()
   367  	defer c.mu.RUnlock()
   368  
   369  	state := c.currentNodeState()
   370  	if !state.IsActiveManager() {
   371  		return nil, c.errNoManager(state)
   372  	}
   373  
   374  	swarmSelector, err := convertSelector(ctx, state.controlClient, selector)
   375  	if err != nil {
   376  		return nil, errors.Wrap(err, "error making log selector")
   377  	}
   378  
   379  	// set the streams we'll use
   380  	stdStreams := []swarmapi.LogStream{}
   381  	if config.ShowStdout {
   382  		stdStreams = append(stdStreams, swarmapi.LogStreamStdout)
   383  	}
   384  	if config.ShowStderr {
   385  		stdStreams = append(stdStreams, swarmapi.LogStreamStderr)
   386  	}
   387  
   388  	// Get tail value squared away - the number of previous log lines we look at
   389  	var tail int64
   390  	// in ContainerLogs, if the tail value is ANYTHING non-integer, we just set
   391  	// it to -1 (all). i don't agree with that, but i also think no tail value
   392  	// should be legitimate. if you don't pass tail, we assume you want "all"
   393  	if config.Tail == "all" || config.Tail == "" {
   394  		// tail of 0 means send all logs on the swarmkit side
   395  		tail = 0
   396  	} else {
   397  		t, err := strconv.Atoi(config.Tail)
   398  		if err != nil {
   399  			return nil, errors.New("tail value must be a positive integer or \"all\"")
   400  		}
   401  		if t < 0 {
   402  			return nil, errors.New("negative tail values not supported")
   403  		}
   404  		// we actually use negative tail in swarmkit to represent messages
   405  		// backwards starting from the beginning. also, -1 means no logs. so,
   406  		// basically, for api compat with docker container logs, add one and
   407  		// flip the sign. we error above if you try to negative tail, which
   408  		// isn't supported by docker (and would error deeper in the stack
   409  		// anyway)
   410  		//
   411  		// See the logs protobuf for more information
   412  		tail = int64(-(t + 1))
   413  	}
   414  
   415  	// get the since value - the time in the past we're looking at logs starting from
   416  	var sinceProto *gogotypes.Timestamp
   417  	if config.Since != "" {
   418  		s, n, err := timetypes.ParseTimestamps(config.Since, 0)
   419  		if err != nil {
   420  			return nil, errors.Wrap(err, "could not parse since timestamp")
   421  		}
   422  		since := time.Unix(s, n)
   423  		sinceProto, err = gogotypes.TimestampProto(since)
   424  		if err != nil {
   425  			return nil, errors.Wrap(err, "could not parse timestamp to proto")
   426  		}
   427  	}
   428  
   429  	stream, err := state.logsClient.SubscribeLogs(ctx, &swarmapi.SubscribeLogsRequest{
   430  		Selector: swarmSelector,
   431  		Options: &swarmapi.LogSubscriptionOptions{
   432  			Follow:  config.Follow,
   433  			Streams: stdStreams,
   434  			Tail:    tail,
   435  			Since:   sinceProto,
   436  		},
   437  	})
   438  	if err != nil {
   439  		return nil, err
   440  	}
   441  
   442  	messageChan := make(chan *backend.LogMessage, 1)
   443  	go func() {
   444  		defer close(messageChan)
   445  		for {
   446  			// Check the context before doing anything.
   447  			select {
   448  			case <-ctx.Done():
   449  				return
   450  			default:
   451  			}
   452  			subscribeMsg, err := stream.Recv()
   453  			if err == io.EOF {
   454  				return
   455  			}
   456  			// if we're not io.EOF, push the message in and return
   457  			if err != nil {
   458  				select {
   459  				case <-ctx.Done():
   460  				case messageChan <- &backend.LogMessage{Err: err}:
   461  				}
   462  				return
   463  			}
   464  
   465  			for _, msg := range subscribeMsg.Messages {
   466  				// make a new message
   467  				m := new(backend.LogMessage)
   468  				m.Attrs = make([]backend.LogAttr, 0, len(msg.Attrs)+3)
   469  				// add the timestamp, adding the error if it fails
   470  				m.Timestamp, err = gogotypes.TimestampFromProto(msg.Timestamp)
   471  				if err != nil {
   472  					m.Err = err
   473  				}
   474  
   475  				nodeKey := contextPrefix + ".node.id"
   476  				serviceKey := contextPrefix + ".service.id"
   477  				taskKey := contextPrefix + ".task.id"
   478  
   479  				// copy over all of the details
   480  				for _, d := range msg.Attrs {
   481  					switch d.Key {
   482  					case nodeKey, serviceKey, taskKey:
   483  						// we have the final say over context details (in case there
   484  						// is a conflict (if the user added a detail with a context's
   485  						// key for some reason))
   486  					default:
   487  						m.Attrs = append(m.Attrs, backend.LogAttr{Key: d.Key, Value: d.Value})
   488  					}
   489  				}
   490  				m.Attrs = append(m.Attrs,
   491  					backend.LogAttr{Key: nodeKey, Value: msg.Context.NodeID},
   492  					backend.LogAttr{Key: serviceKey, Value: msg.Context.ServiceID},
   493  					backend.LogAttr{Key: taskKey, Value: msg.Context.TaskID},
   494  				)
   495  
   496  				switch msg.Stream {
   497  				case swarmapi.LogStreamStdout:
   498  					m.Source = "stdout"
   499  				case swarmapi.LogStreamStderr:
   500  					m.Source = "stderr"
   501  				}
   502  				m.Line = msg.Data
   503  
   504  				// there could be a case where the reader stops accepting
   505  				// messages and the context is canceled. we need to check that
   506  				// here, or otherwise we risk blocking forever on the message
   507  				// send.
   508  				select {
   509  				case <-ctx.Done():
   510  					return
   511  				case messageChan <- m:
   512  				}
   513  			}
   514  		}
   515  	}()
   516  	return messageChan, nil
   517  }
   518  
   519  // convertSelector takes a backend.LogSelector, which contains raw names that
   520  // may or may not be valid, and converts them to an api.LogSelector proto. It
   521  // returns an error if something fails
   522  func convertSelector(ctx context.Context, cc swarmapi.ControlClient, selector *backend.LogSelector) (*swarmapi.LogSelector, error) {
   523  	// don't rely on swarmkit to resolve IDs, do it ourselves
   524  	swarmSelector := &swarmapi.LogSelector{}
   525  	for _, s := range selector.Services {
   526  		service, err := getService(ctx, cc, s, false)
   527  		if err != nil {
   528  			return nil, err
   529  		}
   530  		c := service.Spec.Task.GetContainer()
   531  		if c == nil {
   532  			return nil, errors.New("logs only supported on container tasks")
   533  		}
   534  		swarmSelector.ServiceIDs = append(swarmSelector.ServiceIDs, service.ID)
   535  	}
   536  	for _, t := range selector.Tasks {
   537  		task, err := getTask(ctx, cc, t)
   538  		if err != nil {
   539  			return nil, err
   540  		}
   541  		c := task.Spec.GetContainer()
   542  		if c == nil {
   543  			return nil, errors.New("logs only supported on container tasks")
   544  		}
   545  		swarmSelector.TaskIDs = append(swarmSelector.TaskIDs, task.ID)
   546  	}
   547  	return swarmSelector, nil
   548  }
   549  
   550  // imageWithDigestString takes an image such as name or name:tag
   551  // and returns the image pinned to a digest, such as name@sha256:34234
   552  func (c *Cluster) imageWithDigestString(ctx context.Context, image string, authConfig *apitypes.AuthConfig) (string, error) {
   553  	ref, err := reference.ParseAnyReference(image)
   554  	if err != nil {
   555  		return "", err
   556  	}
   557  	namedRef, ok := ref.(reference.Named)
   558  	if !ok {
   559  		if _, ok := ref.(reference.Digested); ok {
   560  			return image, nil
   561  		}
   562  		return "", errors.Errorf("unknown image reference format: %s", image)
   563  	}
   564  	// only query registry if not a canonical reference (i.e. with digest)
   565  	if _, ok := namedRef.(reference.Canonical); !ok {
   566  		namedRef = reference.TagNameOnly(namedRef)
   567  
   568  		taggedRef, ok := namedRef.(reference.NamedTagged)
   569  		if !ok {
   570  			return "", errors.Errorf("image reference not tagged: %s", image)
   571  		}
   572  
   573  		repo, _, err := c.config.Backend.GetRepository(ctx, taggedRef, authConfig)
   574  		if err != nil {
   575  			return "", err
   576  		}
   577  		dscrptr, err := repo.Tags(ctx).Get(ctx, taggedRef.Tag())
   578  		if err != nil {
   579  			return "", err
   580  		}
   581  
   582  		namedDigestedRef, err := reference.WithDigest(taggedRef, dscrptr.Digest)
   583  		if err != nil {
   584  			return "", err
   585  		}
   586  		// return familiar form until interface updated to return type
   587  		return reference.FamiliarString(namedDigestedRef), nil
   588  	}
   589  	// reference already contains a digest, so just return it
   590  	return reference.FamiliarString(ref), nil
   591  }
   592  
   593  // digestWarning constructs a formatted warning string
   594  // using the image name that could not be pinned by digest. The
   595  // formatting is hardcoded, but could me made smarter in the future
   596  func digestWarning(image string) string {
   597  	return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image)
   598  }