github.com/lazyboychen7/engine@v17.12.1-ce-rc2+incompatible/daemon/cluster/services.go (about)

     1  package cluster
     2  
     3  import (
     4  	"encoding/base64"
     5  	"encoding/json"
     6  	"fmt"
     7  	"io"
     8  	"os"
     9  	"strconv"
    10  	"strings"
    11  	"time"
    12  
    13  	"github.com/docker/distribution/reference"
    14  	apitypes "github.com/docker/docker/api/types"
    15  	"github.com/docker/docker/api/types/backend"
    16  	types "github.com/docker/docker/api/types/swarm"
    17  	timetypes "github.com/docker/docker/api/types/time"
    18  	"github.com/docker/docker/daemon/cluster/convert"
    19  	runconfigopts "github.com/docker/docker/runconfig/opts"
    20  	swarmapi "github.com/docker/swarmkit/api"
    21  	gogotypes "github.com/gogo/protobuf/types"
    22  	"github.com/pkg/errors"
    23  	"github.com/sirupsen/logrus"
    24  	"golang.org/x/net/context"
    25  )
    26  
    27  // GetServices returns all services of a managed swarm cluster.
    28  func (c *Cluster) GetServices(options apitypes.ServiceListOptions) ([]types.Service, error) {
    29  	c.mu.RLock()
    30  	defer c.mu.RUnlock()
    31  
    32  	state := c.currentNodeState()
    33  	if !state.IsActiveManager() {
    34  		return nil, c.errNoManager(state)
    35  	}
    36  
    37  	// We move the accepted filter check here as "mode" filter
    38  	// is processed in the daemon, not in SwarmKit. So it might
    39  	// be good to have accepted file check in the same file as
    40  	// the filter processing (in the for loop below).
    41  	accepted := map[string]bool{
    42  		"name":    true,
    43  		"id":      true,
    44  		"label":   true,
    45  		"mode":    true,
    46  		"runtime": true,
    47  	}
    48  	if err := options.Filters.Validate(accepted); err != nil {
    49  		return nil, err
    50  	}
    51  
    52  	if len(options.Filters.Get("runtime")) == 0 {
    53  		// Default to using the container runtime filter
    54  		options.Filters.Add("runtime", string(types.RuntimeContainer))
    55  	}
    56  
    57  	filters := &swarmapi.ListServicesRequest_Filters{
    58  		NamePrefixes: options.Filters.Get("name"),
    59  		IDPrefixes:   options.Filters.Get("id"),
    60  		Labels:       runconfigopts.ConvertKVStringsToMap(options.Filters.Get("label")),
    61  		Runtimes:     options.Filters.Get("runtime"),
    62  	}
    63  
    64  	ctx, cancel := c.getRequestContext()
    65  	defer cancel()
    66  
    67  	r, err := state.controlClient.ListServices(
    68  		ctx,
    69  		&swarmapi.ListServicesRequest{Filters: filters})
    70  	if err != nil {
    71  		return nil, err
    72  	}
    73  
    74  	services := make([]types.Service, 0, len(r.Services))
    75  
    76  	for _, service := range r.Services {
    77  		if options.Filters.Contains("mode") {
    78  			var mode string
    79  			switch service.Spec.GetMode().(type) {
    80  			case *swarmapi.ServiceSpec_Global:
    81  				mode = "global"
    82  			case *swarmapi.ServiceSpec_Replicated:
    83  				mode = "replicated"
    84  			}
    85  
    86  			if !options.Filters.ExactMatch("mode", mode) {
    87  				continue
    88  			}
    89  		}
    90  		svcs, err := convert.ServiceFromGRPC(*service)
    91  		if err != nil {
    92  			return nil, err
    93  		}
    94  		services = append(services, svcs)
    95  	}
    96  
    97  	return services, nil
    98  }
    99  
   100  // GetService returns a service based on an ID or name.
   101  func (c *Cluster) GetService(input string, insertDefaults bool) (types.Service, error) {
   102  	var service *swarmapi.Service
   103  	if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
   104  		s, err := getService(ctx, state.controlClient, input, insertDefaults)
   105  		if err != nil {
   106  			return err
   107  		}
   108  		service = s
   109  		return nil
   110  	}); err != nil {
   111  		return types.Service{}, err
   112  	}
   113  	svc, err := convert.ServiceFromGRPC(*service)
   114  	if err != nil {
   115  		return types.Service{}, err
   116  	}
   117  	return svc, nil
   118  }
   119  
   120  // CreateService creates a new service in a managed swarm cluster.
   121  func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string, queryRegistry bool) (*apitypes.ServiceCreateResponse, error) {
   122  	var resp *apitypes.ServiceCreateResponse
   123  	err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
   124  		err := c.populateNetworkID(ctx, state.controlClient, &s)
   125  		if err != nil {
   126  			return err
   127  		}
   128  
   129  		serviceSpec, err := convert.ServiceSpecToGRPC(s)
   130  		if err != nil {
   131  			return convertError{err}
   132  		}
   133  
   134  		resp = &apitypes.ServiceCreateResponse{}
   135  
   136  		switch serviceSpec.Task.Runtime.(type) {
   137  		// handle other runtimes here
   138  		case *swarmapi.TaskSpec_Generic:
   139  			switch serviceSpec.Task.GetGeneric().Kind {
   140  			case string(types.RuntimePlugin):
   141  				info, _ := c.config.Backend.SystemInfo()
   142  				if !info.ExperimentalBuild {
   143  					return fmt.Errorf("runtime type %q only supported in experimental", types.RuntimePlugin)
   144  				}
   145  				if s.TaskTemplate.PluginSpec == nil {
   146  					return errors.New("plugin spec must be set")
   147  				}
   148  
   149  			default:
   150  				return fmt.Errorf("unsupported runtime type: %q", serviceSpec.Task.GetGeneric().Kind)
   151  			}
   152  
   153  			r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec})
   154  			if err != nil {
   155  				return err
   156  			}
   157  
   158  			resp.ID = r.Service.ID
   159  		case *swarmapi.TaskSpec_Container:
   160  			ctnr := serviceSpec.Task.GetContainer()
   161  			if ctnr == nil {
   162  				return errors.New("service does not use container tasks")
   163  			}
   164  			if encodedAuth != "" {
   165  				ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
   166  			}
   167  
   168  			// retrieve auth config from encoded auth
   169  			authConfig := &apitypes.AuthConfig{}
   170  			if encodedAuth != "" {
   171  				authReader := strings.NewReader(encodedAuth)
   172  				dec := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, authReader))
   173  				if err := dec.Decode(authConfig); err != nil {
   174  					logrus.Warnf("invalid authconfig: %v", err)
   175  				}
   176  			}
   177  
   178  			// pin image by digest for API versions < 1.30
   179  			// TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE"
   180  			// should be removed in the future. Since integration tests only use the
   181  			// latest API version, so this is no longer required.
   182  			if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry {
   183  				digestImage, err := c.imageWithDigestString(ctx, ctnr.Image, authConfig)
   184  				if err != nil {
   185  					logrus.Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error())
   186  					// warning in the client response should be concise
   187  					resp.Warnings = append(resp.Warnings, digestWarning(ctnr.Image))
   188  
   189  				} else if ctnr.Image != digestImage {
   190  					logrus.Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage)
   191  					ctnr.Image = digestImage
   192  
   193  				} else {
   194  					logrus.Debugf("creating service using supplied digest reference %s", ctnr.Image)
   195  
   196  				}
   197  
   198  				// Replace the context with a fresh one.
   199  				// If we timed out while communicating with the
   200  				// registry, then "ctx" will already be expired, which
   201  				// would cause UpdateService below to fail. Reusing
   202  				// "ctx" could make it impossible to create a service
   203  				// if the registry is slow or unresponsive.
   204  				var cancel func()
   205  				ctx, cancel = c.getRequestContext()
   206  				defer cancel()
   207  			}
   208  
   209  			r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec})
   210  			if err != nil {
   211  				return err
   212  			}
   213  
   214  			resp.ID = r.Service.ID
   215  		}
   216  		return nil
   217  	})
   218  
   219  	return resp, err
   220  }
   221  
   222  // UpdateService updates existing service to match new properties.
   223  func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec types.ServiceSpec, flags apitypes.ServiceUpdateOptions, queryRegistry bool) (*apitypes.ServiceUpdateResponse, error) {
   224  	var resp *apitypes.ServiceUpdateResponse
   225  
   226  	err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
   227  
   228  		err := c.populateNetworkID(ctx, state.controlClient, &spec)
   229  		if err != nil {
   230  			return err
   231  		}
   232  
   233  		serviceSpec, err := convert.ServiceSpecToGRPC(spec)
   234  		if err != nil {
   235  			return convertError{err}
   236  		}
   237  
   238  		currentService, err := getService(ctx, state.controlClient, serviceIDOrName, false)
   239  		if err != nil {
   240  			return err
   241  		}
   242  
   243  		resp = &apitypes.ServiceUpdateResponse{}
   244  
   245  		switch serviceSpec.Task.Runtime.(type) {
   246  		case *swarmapi.TaskSpec_Generic:
   247  			switch serviceSpec.Task.GetGeneric().Kind {
   248  			case string(types.RuntimePlugin):
   249  				if spec.TaskTemplate.PluginSpec == nil {
   250  					return errors.New("plugin spec must be set")
   251  				}
   252  			}
   253  		case *swarmapi.TaskSpec_Container:
   254  			newCtnr := serviceSpec.Task.GetContainer()
   255  			if newCtnr == nil {
   256  				return errors.New("service does not use container tasks")
   257  			}
   258  
   259  			encodedAuth := flags.EncodedRegistryAuth
   260  			if encodedAuth != "" {
   261  				newCtnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
   262  			} else {
   263  				// this is needed because if the encodedAuth isn't being updated then we
   264  				// shouldn't lose it, and continue to use the one that was already present
   265  				var ctnr *swarmapi.ContainerSpec
   266  				switch flags.RegistryAuthFrom {
   267  				case apitypes.RegistryAuthFromSpec, "":
   268  					ctnr = currentService.Spec.Task.GetContainer()
   269  				case apitypes.RegistryAuthFromPreviousSpec:
   270  					if currentService.PreviousSpec == nil {
   271  						return errors.New("service does not have a previous spec")
   272  					}
   273  					ctnr = currentService.PreviousSpec.Task.GetContainer()
   274  				default:
   275  					return errors.New("unsupported registryAuthFrom value")
   276  				}
   277  				if ctnr == nil {
   278  					return errors.New("service does not use container tasks")
   279  				}
   280  				newCtnr.PullOptions = ctnr.PullOptions
   281  				// update encodedAuth so it can be used to pin image by digest
   282  				if ctnr.PullOptions != nil {
   283  					encodedAuth = ctnr.PullOptions.RegistryAuth
   284  				}
   285  			}
   286  
   287  			// retrieve auth config from encoded auth
   288  			authConfig := &apitypes.AuthConfig{}
   289  			if encodedAuth != "" {
   290  				if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil {
   291  					logrus.Warnf("invalid authconfig: %v", err)
   292  				}
   293  			}
   294  
   295  			// pin image by digest for API versions < 1.30
   296  			// TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE"
   297  			// should be removed in the future. Since integration tests only use the
   298  			// latest API version, so this is no longer required.
   299  			if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry {
   300  				digestImage, err := c.imageWithDigestString(ctx, newCtnr.Image, authConfig)
   301  				if err != nil {
   302  					logrus.Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error())
   303  					// warning in the client response should be concise
   304  					resp.Warnings = append(resp.Warnings, digestWarning(newCtnr.Image))
   305  				} else if newCtnr.Image != digestImage {
   306  					logrus.Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage)
   307  					newCtnr.Image = digestImage
   308  				} else {
   309  					logrus.Debugf("updating service using supplied digest reference %s", newCtnr.Image)
   310  				}
   311  
   312  				// Replace the context with a fresh one.
   313  				// If we timed out while communicating with the
   314  				// registry, then "ctx" will already be expired, which
   315  				// would cause UpdateService below to fail. Reusing
   316  				// "ctx" could make it impossible to update a service
   317  				// if the registry is slow or unresponsive.
   318  				var cancel func()
   319  				ctx, cancel = c.getRequestContext()
   320  				defer cancel()
   321  			}
   322  		}
   323  
   324  		var rollback swarmapi.UpdateServiceRequest_Rollback
   325  		switch flags.Rollback {
   326  		case "", "none":
   327  			rollback = swarmapi.UpdateServiceRequest_NONE
   328  		case "previous":
   329  			rollback = swarmapi.UpdateServiceRequest_PREVIOUS
   330  		default:
   331  			return fmt.Errorf("unrecognized rollback option %s", flags.Rollback)
   332  		}
   333  
   334  		_, err = state.controlClient.UpdateService(
   335  			ctx,
   336  			&swarmapi.UpdateServiceRequest{
   337  				ServiceID: currentService.ID,
   338  				Spec:      &serviceSpec,
   339  				ServiceVersion: &swarmapi.Version{
   340  					Index: version,
   341  				},
   342  				Rollback: rollback,
   343  			},
   344  		)
   345  		return err
   346  	})
   347  	return resp, err
   348  }
   349  
   350  // RemoveService removes a service from a managed swarm cluster.
   351  func (c *Cluster) RemoveService(input string) error {
   352  	return c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
   353  		service, err := getService(ctx, state.controlClient, input, false)
   354  		if err != nil {
   355  			return err
   356  		}
   357  
   358  		_, err = state.controlClient.RemoveService(ctx, &swarmapi.RemoveServiceRequest{ServiceID: service.ID})
   359  		return err
   360  	})
   361  }
   362  
   363  // ServiceLogs collects service logs and writes them back to `config.OutStream`
   364  func (c *Cluster) ServiceLogs(ctx context.Context, selector *backend.LogSelector, config *apitypes.ContainerLogsOptions) (<-chan *backend.LogMessage, error) {
   365  	c.mu.RLock()
   366  	defer c.mu.RUnlock()
   367  
   368  	state := c.currentNodeState()
   369  	if !state.IsActiveManager() {
   370  		return nil, c.errNoManager(state)
   371  	}
   372  
   373  	swarmSelector, err := convertSelector(ctx, state.controlClient, selector)
   374  	if err != nil {
   375  		return nil, errors.Wrap(err, "error making log selector")
   376  	}
   377  
   378  	// set the streams we'll use
   379  	stdStreams := []swarmapi.LogStream{}
   380  	if config.ShowStdout {
   381  		stdStreams = append(stdStreams, swarmapi.LogStreamStdout)
   382  	}
   383  	if config.ShowStderr {
   384  		stdStreams = append(stdStreams, swarmapi.LogStreamStderr)
   385  	}
   386  
   387  	// Get tail value squared away - the number of previous log lines we look at
   388  	var tail int64
   389  	// in ContainerLogs, if the tail value is ANYTHING non-integer, we just set
   390  	// it to -1 (all). i don't agree with that, but i also think no tail value
   391  	// should be legitimate. if you don't pass tail, we assume you want "all"
   392  	if config.Tail == "all" || config.Tail == "" {
   393  		// tail of 0 means send all logs on the swarmkit side
   394  		tail = 0
   395  	} else {
   396  		t, err := strconv.Atoi(config.Tail)
   397  		if err != nil {
   398  			return nil, errors.New("tail value must be a positive integer or \"all\"")
   399  		}
   400  		if t < 0 {
   401  			return nil, errors.New("negative tail values not supported")
   402  		}
   403  		// we actually use negative tail in swarmkit to represent messages
   404  		// backwards starting from the beginning. also, -1 means no logs. so,
   405  		// basically, for api compat with docker container logs, add one and
   406  		// flip the sign. we error above if you try to negative tail, which
   407  		// isn't supported by docker (and would error deeper in the stack
   408  		// anyway)
   409  		//
   410  		// See the logs protobuf for more information
   411  		tail = int64(-(t + 1))
   412  	}
   413  
   414  	// get the since value - the time in the past we're looking at logs starting from
   415  	var sinceProto *gogotypes.Timestamp
   416  	if config.Since != "" {
   417  		s, n, err := timetypes.ParseTimestamps(config.Since, 0)
   418  		if err != nil {
   419  			return nil, errors.Wrap(err, "could not parse since timestamp")
   420  		}
   421  		since := time.Unix(s, n)
   422  		sinceProto, err = gogotypes.TimestampProto(since)
   423  		if err != nil {
   424  			return nil, errors.Wrap(err, "could not parse timestamp to proto")
   425  		}
   426  	}
   427  
   428  	stream, err := state.logsClient.SubscribeLogs(ctx, &swarmapi.SubscribeLogsRequest{
   429  		Selector: swarmSelector,
   430  		Options: &swarmapi.LogSubscriptionOptions{
   431  			Follow:  config.Follow,
   432  			Streams: stdStreams,
   433  			Tail:    tail,
   434  			Since:   sinceProto,
   435  		},
   436  	})
   437  	if err != nil {
   438  		return nil, err
   439  	}
   440  
   441  	messageChan := make(chan *backend.LogMessage, 1)
   442  	go func() {
   443  		defer close(messageChan)
   444  		for {
   445  			// Check the context before doing anything.
   446  			select {
   447  			case <-ctx.Done():
   448  				return
   449  			default:
   450  			}
   451  			subscribeMsg, err := stream.Recv()
   452  			if err == io.EOF {
   453  				return
   454  			}
   455  			// if we're not io.EOF, push the message in and return
   456  			if err != nil {
   457  				select {
   458  				case <-ctx.Done():
   459  				case messageChan <- &backend.LogMessage{Err: err}:
   460  				}
   461  				return
   462  			}
   463  
   464  			for _, msg := range subscribeMsg.Messages {
   465  				// make a new message
   466  				m := new(backend.LogMessage)
   467  				m.Attrs = make([]backend.LogAttr, 0, len(msg.Attrs)+3)
   468  				// add the timestamp, adding the error if it fails
   469  				m.Timestamp, err = gogotypes.TimestampFromProto(msg.Timestamp)
   470  				if err != nil {
   471  					m.Err = err
   472  				}
   473  
   474  				nodeKey := contextPrefix + ".node.id"
   475  				serviceKey := contextPrefix + ".service.id"
   476  				taskKey := contextPrefix + ".task.id"
   477  
   478  				// copy over all of the details
   479  				for _, d := range msg.Attrs {
   480  					switch d.Key {
   481  					case nodeKey, serviceKey, taskKey:
   482  						// we have the final say over context details (in case there
   483  						// is a conflict (if the user added a detail with a context's
   484  						// key for some reason))
   485  					default:
   486  						m.Attrs = append(m.Attrs, backend.LogAttr{Key: d.Key, Value: d.Value})
   487  					}
   488  				}
   489  				m.Attrs = append(m.Attrs,
   490  					backend.LogAttr{Key: nodeKey, Value: msg.Context.NodeID},
   491  					backend.LogAttr{Key: serviceKey, Value: msg.Context.ServiceID},
   492  					backend.LogAttr{Key: taskKey, Value: msg.Context.TaskID},
   493  				)
   494  
   495  				switch msg.Stream {
   496  				case swarmapi.LogStreamStdout:
   497  					m.Source = "stdout"
   498  				case swarmapi.LogStreamStderr:
   499  					m.Source = "stderr"
   500  				}
   501  				m.Line = msg.Data
   502  
   503  				// there could be a case where the reader stops accepting
   504  				// messages and the context is canceled. we need to check that
   505  				// here, or otherwise we risk blocking forever on the message
   506  				// send.
   507  				select {
   508  				case <-ctx.Done():
   509  					return
   510  				case messageChan <- m:
   511  				}
   512  			}
   513  		}
   514  	}()
   515  	return messageChan, nil
   516  }
   517  
   518  // convertSelector takes a backend.LogSelector, which contains raw names that
   519  // may or may not be valid, and converts them to an api.LogSelector proto. It
   520  // returns an error if something fails
   521  func convertSelector(ctx context.Context, cc swarmapi.ControlClient, selector *backend.LogSelector) (*swarmapi.LogSelector, error) {
   522  	// don't rely on swarmkit to resolve IDs, do it ourselves
   523  	swarmSelector := &swarmapi.LogSelector{}
   524  	for _, s := range selector.Services {
   525  		service, err := getService(ctx, cc, s, false)
   526  		if err != nil {
   527  			return nil, err
   528  		}
   529  		c := service.Spec.Task.GetContainer()
   530  		if c == nil {
   531  			return nil, errors.New("logs only supported on container tasks")
   532  		}
   533  		swarmSelector.ServiceIDs = append(swarmSelector.ServiceIDs, service.ID)
   534  	}
   535  	for _, t := range selector.Tasks {
   536  		task, err := getTask(ctx, cc, t)
   537  		if err != nil {
   538  			return nil, err
   539  		}
   540  		c := task.Spec.GetContainer()
   541  		if c == nil {
   542  			return nil, errors.New("logs only supported on container tasks")
   543  		}
   544  		swarmSelector.TaskIDs = append(swarmSelector.TaskIDs, task.ID)
   545  	}
   546  	return swarmSelector, nil
   547  }
   548  
   549  // imageWithDigestString takes an image such as name or name:tag
   550  // and returns the image pinned to a digest, such as name@sha256:34234
   551  func (c *Cluster) imageWithDigestString(ctx context.Context, image string, authConfig *apitypes.AuthConfig) (string, error) {
   552  	ref, err := reference.ParseAnyReference(image)
   553  	if err != nil {
   554  		return "", err
   555  	}
   556  	namedRef, ok := ref.(reference.Named)
   557  	if !ok {
   558  		if _, ok := ref.(reference.Digested); ok {
   559  			return image, nil
   560  		}
   561  		return "", errors.Errorf("unknown image reference format: %s", image)
   562  	}
   563  	// only query registry if not a canonical reference (i.e. with digest)
   564  	if _, ok := namedRef.(reference.Canonical); !ok {
   565  		namedRef = reference.TagNameOnly(namedRef)
   566  
   567  		taggedRef, ok := namedRef.(reference.NamedTagged)
   568  		if !ok {
   569  			return "", errors.Errorf("image reference not tagged: %s", image)
   570  		}
   571  
   572  		repo, _, err := c.config.Backend.GetRepository(ctx, taggedRef, authConfig)
   573  		if err != nil {
   574  			return "", err
   575  		}
   576  		dscrptr, err := repo.Tags(ctx).Get(ctx, taggedRef.Tag())
   577  		if err != nil {
   578  			return "", err
   579  		}
   580  
   581  		namedDigestedRef, err := reference.WithDigest(taggedRef, dscrptr.Digest)
   582  		if err != nil {
   583  			return "", err
   584  		}
   585  		// return familiar form until interface updated to return type
   586  		return reference.FamiliarString(namedDigestedRef), nil
   587  	}
   588  	// reference already contains a digest, so just return it
   589  	return reference.FamiliarString(ref), nil
   590  }
   591  
   592  // digestWarning constructs a formatted warning string
   593  // using the image name that could not be pinned by digest. The
   594  // formatting is hardcoded, but could me made smarter in the future
   595  func digestWarning(image string) string {
   596  	return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image)
   597  }