github.com/adityamillind98/moby@v23.0.0-rc.4+incompatible/daemon/cluster/services.go (about) 1 package cluster // import "github.com/docker/docker/daemon/cluster" 2 3 import ( 4 "context" 5 "encoding/base64" 6 "encoding/json" 7 "fmt" 8 "io" 9 "os" 10 "strconv" 11 "strings" 12 "time" 13 14 "github.com/docker/distribution/reference" 15 apitypes "github.com/docker/docker/api/types" 16 "github.com/docker/docker/api/types/backend" 17 types "github.com/docker/docker/api/types/swarm" 18 timetypes "github.com/docker/docker/api/types/time" 19 "github.com/docker/docker/daemon/cluster/convert" 20 "github.com/docker/docker/errdefs" 21 runconfigopts "github.com/docker/docker/runconfig/opts" 22 gogotypes "github.com/gogo/protobuf/types" 23 swarmapi "github.com/moby/swarmkit/v2/api" 24 "github.com/pkg/errors" 25 "github.com/sirupsen/logrus" 26 "google.golang.org/grpc" 27 ) 28 29 // GetServices returns all services of a managed swarm cluster. 30 func (c *Cluster) GetServices(options apitypes.ServiceListOptions) ([]types.Service, error) { 31 c.mu.RLock() 32 defer c.mu.RUnlock() 33 34 state := c.currentNodeState() 35 if !state.IsActiveManager() { 36 return nil, c.errNoManager(state) 37 } 38 39 // We move the accepted filter check here as "mode" filter 40 // is processed in the daemon, not in SwarmKit. So it might 41 // be good to have accepted file check in the same file as 42 // the filter processing (in the for loop below). 43 accepted := map[string]bool{ 44 "name": true, 45 "id": true, 46 "label": true, 47 "mode": true, 48 "runtime": true, 49 } 50 if err := options.Filters.Validate(accepted); err != nil { 51 return nil, err 52 } 53 54 if len(options.Filters.Get("runtime")) == 0 { 55 // Default to using the container runtime filter 56 options.Filters.Add("runtime", string(types.RuntimeContainer)) 57 } 58 59 filters := &swarmapi.ListServicesRequest_Filters{ 60 NamePrefixes: options.Filters.Get("name"), 61 IDPrefixes: options.Filters.Get("id"), 62 Labels: runconfigopts.ConvertKVStringsToMap(options.Filters.Get("label")), 63 Runtimes: options.Filters.Get("runtime"), 64 } 65 66 ctx, cancel := c.getRequestContext() 67 defer cancel() 68 69 r, err := state.controlClient.ListServices( 70 ctx, 71 &swarmapi.ListServicesRequest{Filters: filters}, 72 grpc.MaxCallRecvMsgSize(defaultRecvSizeForListResponse), 73 ) 74 if err != nil { 75 return nil, err 76 } 77 78 services := make([]types.Service, 0, len(r.Services)) 79 80 // if the user requests the service statuses, we'll store the IDs needed 81 // in this slice 82 var serviceIDs []string 83 if options.Status { 84 serviceIDs = make([]string, 0, len(r.Services)) 85 } 86 for _, service := range r.Services { 87 if options.Filters.Contains("mode") { 88 var mode string 89 switch service.Spec.GetMode().(type) { 90 case *swarmapi.ServiceSpec_Global: 91 mode = "global" 92 case *swarmapi.ServiceSpec_Replicated: 93 mode = "replicated" 94 case *swarmapi.ServiceSpec_ReplicatedJob: 95 mode = "replicated-job" 96 case *swarmapi.ServiceSpec_GlobalJob: 97 mode = "global-job" 98 } 99 100 if !options.Filters.ExactMatch("mode", mode) { 101 continue 102 } 103 } 104 if options.Status { 105 serviceIDs = append(serviceIDs, service.ID) 106 } 107 svcs, err := convert.ServiceFromGRPC(*service) 108 if err != nil { 109 return nil, err 110 } 111 services = append(services, svcs) 112 } 113 114 if options.Status { 115 // Listing service statuses is a separate call because, while it is the 116 // most common UI operation, it is still just a UI operation, and it 117 // would be improper to include this data in swarm's Service object. 118 // We pay the cost with some complexity here, but this is still way 119 // more efficient than marshalling and unmarshalling all the JSON 120 // needed to list tasks and get this data otherwise client-side 121 resp, err := state.controlClient.ListServiceStatuses( 122 ctx, 123 &swarmapi.ListServiceStatusesRequest{Services: serviceIDs}, 124 grpc.MaxCallRecvMsgSize(defaultRecvSizeForListResponse), 125 ) 126 if err != nil { 127 return nil, err 128 } 129 130 // we'll need to match up statuses in the response with the services in 131 // the list operation. if we did this by operating on two lists, the 132 // result would be quadratic. instead, make a mapping of service IDs to 133 // service statuses so that this is roughly linear. additionally, 134 // convert the status response to an engine api service status here. 135 serviceMap := map[string]*types.ServiceStatus{} 136 for _, status := range resp.Statuses { 137 serviceMap[status.ServiceID] = &types.ServiceStatus{ 138 RunningTasks: status.RunningTasks, 139 DesiredTasks: status.DesiredTasks, 140 CompletedTasks: status.CompletedTasks, 141 } 142 } 143 144 // because this is a list of values and not pointers, make sure we 145 // actually alter the value when iterating. 146 for i, service := range services { 147 // the return value of the ListServiceStatuses operation is 148 // guaranteed to contain a value in the response for every argument 149 // in the request, so we can safely do this assignment. and even if 150 // it wasn't, and the service ID was for some reason absent from 151 // this map, the resulting value of service.Status would just be 152 // nil -- the same thing it was before 153 service.ServiceStatus = serviceMap[service.ID] 154 services[i] = service 155 } 156 } 157 158 return services, nil 159 } 160 161 // GetService returns a service based on an ID or name. 162 func (c *Cluster) GetService(input string, insertDefaults bool) (types.Service, error) { 163 var service *swarmapi.Service 164 if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { 165 s, err := getService(ctx, state.controlClient, input, insertDefaults) 166 if err != nil { 167 return err 168 } 169 service = s 170 return nil 171 }); err != nil { 172 return types.Service{}, err 173 } 174 svc, err := convert.ServiceFromGRPC(*service) 175 if err != nil { 176 return types.Service{}, err 177 } 178 return svc, nil 179 } 180 181 // CreateService creates a new service in a managed swarm cluster. 182 func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string, queryRegistry bool) (*apitypes.ServiceCreateResponse, error) { 183 var resp *apitypes.ServiceCreateResponse 184 err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { 185 err := c.populateNetworkID(ctx, state.controlClient, &s) 186 if err != nil { 187 return err 188 } 189 190 serviceSpec, err := convert.ServiceSpecToGRPC(s) 191 if err != nil { 192 return errdefs.InvalidParameter(err) 193 } 194 195 resp = &apitypes.ServiceCreateResponse{} 196 197 switch serviceSpec.Task.Runtime.(type) { 198 case *swarmapi.TaskSpec_Attachment: 199 return fmt.Errorf("invalid task spec: spec type %q not supported", types.RuntimeNetworkAttachment) 200 // handle other runtimes here 201 case *swarmapi.TaskSpec_Generic: 202 switch serviceSpec.Task.GetGeneric().Kind { 203 case string(types.RuntimePlugin): 204 if !c.config.Backend.HasExperimental() { 205 return fmt.Errorf("runtime type %q only supported in experimental", types.RuntimePlugin) 206 } 207 if s.TaskTemplate.PluginSpec == nil { 208 return errors.New("plugin spec must be set") 209 } 210 211 default: 212 return fmt.Errorf("unsupported runtime type: %q", serviceSpec.Task.GetGeneric().Kind) 213 } 214 215 r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec}) 216 if err != nil { 217 return err 218 } 219 220 resp.ID = r.Service.ID 221 case *swarmapi.TaskSpec_Container: 222 ctnr := serviceSpec.Task.GetContainer() 223 if ctnr == nil { 224 return errors.New("service does not use container tasks") 225 } 226 if encodedAuth != "" { 227 ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth} 228 } 229 230 // retrieve auth config from encoded auth 231 authConfig := &apitypes.AuthConfig{} 232 if encodedAuth != "" { 233 authReader := strings.NewReader(encodedAuth) 234 dec := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, authReader)) 235 if err := dec.Decode(authConfig); err != nil { 236 logrus.Warnf("invalid authconfig: %v", err) 237 } 238 } 239 240 // pin image by digest for API versions < 1.30 241 // TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE" 242 // should be removed in the future. Since integration tests only use the 243 // latest API version, so this is no longer required. 244 if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry { 245 digestImage, err := c.imageWithDigestString(ctx, ctnr.Image, authConfig) 246 if err != nil { 247 logrus.Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error()) 248 // warning in the client response should be concise 249 resp.Warnings = append(resp.Warnings, digestWarning(ctnr.Image)) 250 } else if ctnr.Image != digestImage { 251 logrus.Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage) 252 ctnr.Image = digestImage 253 } else { 254 logrus.Debugf("creating service using supplied digest reference %s", ctnr.Image) 255 } 256 257 // Replace the context with a fresh one. 258 // If we timed out while communicating with the 259 // registry, then "ctx" will already be expired, which 260 // would cause UpdateService below to fail. Reusing 261 // "ctx" could make it impossible to create a service 262 // if the registry is slow or unresponsive. 263 var cancel func() 264 ctx, cancel = c.getRequestContext() 265 defer cancel() 266 } 267 268 r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec}) 269 if err != nil { 270 return err 271 } 272 273 resp.ID = r.Service.ID 274 } 275 return nil 276 }) 277 278 return resp, err 279 } 280 281 // UpdateService updates existing service to match new properties. 282 func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec types.ServiceSpec, flags apitypes.ServiceUpdateOptions, queryRegistry bool) (*apitypes.ServiceUpdateResponse, error) { 283 var resp *apitypes.ServiceUpdateResponse 284 285 err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { 286 err := c.populateNetworkID(ctx, state.controlClient, &spec) 287 if err != nil { 288 return err 289 } 290 291 serviceSpec, err := convert.ServiceSpecToGRPC(spec) 292 if err != nil { 293 return errdefs.InvalidParameter(err) 294 } 295 296 currentService, err := getService(ctx, state.controlClient, serviceIDOrName, false) 297 if err != nil { 298 return err 299 } 300 301 resp = &apitypes.ServiceUpdateResponse{} 302 303 switch serviceSpec.Task.Runtime.(type) { 304 case *swarmapi.TaskSpec_Attachment: 305 return fmt.Errorf("invalid task spec: spec type %q not supported", types.RuntimeNetworkAttachment) 306 case *swarmapi.TaskSpec_Generic: 307 switch serviceSpec.Task.GetGeneric().Kind { 308 case string(types.RuntimePlugin): 309 if spec.TaskTemplate.PluginSpec == nil { 310 return errors.New("plugin spec must be set") 311 } 312 } 313 case *swarmapi.TaskSpec_Container: 314 newCtnr := serviceSpec.Task.GetContainer() 315 if newCtnr == nil { 316 return errors.New("service does not use container tasks") 317 } 318 319 encodedAuth := flags.EncodedRegistryAuth 320 if encodedAuth != "" { 321 newCtnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth} 322 } else { 323 // this is needed because if the encodedAuth isn't being updated then we 324 // shouldn't lose it, and continue to use the one that was already present 325 var ctnr *swarmapi.ContainerSpec 326 switch flags.RegistryAuthFrom { 327 case apitypes.RegistryAuthFromSpec, "": 328 ctnr = currentService.Spec.Task.GetContainer() 329 case apitypes.RegistryAuthFromPreviousSpec: 330 if currentService.PreviousSpec == nil { 331 return errors.New("service does not have a previous spec") 332 } 333 ctnr = currentService.PreviousSpec.Task.GetContainer() 334 default: 335 return errors.New("unsupported registryAuthFrom value") 336 } 337 if ctnr == nil { 338 return errors.New("service does not use container tasks") 339 } 340 newCtnr.PullOptions = ctnr.PullOptions 341 // update encodedAuth so it can be used to pin image by digest 342 if ctnr.PullOptions != nil { 343 encodedAuth = ctnr.PullOptions.RegistryAuth 344 } 345 } 346 347 // retrieve auth config from encoded auth 348 authConfig := &apitypes.AuthConfig{} 349 if encodedAuth != "" { 350 if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil { 351 logrus.Warnf("invalid authconfig: %v", err) 352 } 353 } 354 355 // pin image by digest for API versions < 1.30 356 // TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE" 357 // should be removed in the future. Since integration tests only use the 358 // latest API version, so this is no longer required. 359 if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry { 360 digestImage, err := c.imageWithDigestString(ctx, newCtnr.Image, authConfig) 361 if err != nil { 362 logrus.Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error()) 363 // warning in the client response should be concise 364 resp.Warnings = append(resp.Warnings, digestWarning(newCtnr.Image)) 365 } else if newCtnr.Image != digestImage { 366 logrus.Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage) 367 newCtnr.Image = digestImage 368 } else { 369 logrus.Debugf("updating service using supplied digest reference %s", newCtnr.Image) 370 } 371 372 // Replace the context with a fresh one. 373 // If we timed out while communicating with the 374 // registry, then "ctx" will already be expired, which 375 // would cause UpdateService below to fail. Reusing 376 // "ctx" could make it impossible to update a service 377 // if the registry is slow or unresponsive. 378 var cancel func() 379 ctx, cancel = c.getRequestContext() 380 defer cancel() 381 } 382 } 383 384 var rollback swarmapi.UpdateServiceRequest_Rollback 385 switch flags.Rollback { 386 case "", "none": 387 rollback = swarmapi.UpdateServiceRequest_NONE 388 case "previous": 389 rollback = swarmapi.UpdateServiceRequest_PREVIOUS 390 default: 391 return fmt.Errorf("unrecognized rollback option %s", flags.Rollback) 392 } 393 394 _, err = state.controlClient.UpdateService( 395 ctx, 396 &swarmapi.UpdateServiceRequest{ 397 ServiceID: currentService.ID, 398 Spec: &serviceSpec, 399 ServiceVersion: &swarmapi.Version{ 400 Index: version, 401 }, 402 Rollback: rollback, 403 }, 404 ) 405 return err 406 }) 407 return resp, err 408 } 409 410 // RemoveService removes a service from a managed swarm cluster. 411 func (c *Cluster) RemoveService(input string) error { 412 return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { 413 service, err := getService(ctx, state.controlClient, input, false) 414 if err != nil { 415 return err 416 } 417 418 _, err = state.controlClient.RemoveService(ctx, &swarmapi.RemoveServiceRequest{ServiceID: service.ID}) 419 return err 420 }) 421 } 422 423 // ServiceLogs collects service logs and writes them back to `config.OutStream` 424 func (c *Cluster) ServiceLogs(ctx context.Context, selector *backend.LogSelector, config *apitypes.ContainerLogsOptions) (<-chan *backend.LogMessage, error) { 425 c.mu.RLock() 426 defer c.mu.RUnlock() 427 428 state := c.currentNodeState() 429 if !state.IsActiveManager() { 430 return nil, c.errNoManager(state) 431 } 432 433 swarmSelector, err := convertSelector(ctx, state.controlClient, selector) 434 if err != nil { 435 return nil, errors.Wrap(err, "error making log selector") 436 } 437 438 // set the streams we'll use 439 stdStreams := []swarmapi.LogStream{} 440 if config.ShowStdout { 441 stdStreams = append(stdStreams, swarmapi.LogStreamStdout) 442 } 443 if config.ShowStderr { 444 stdStreams = append(stdStreams, swarmapi.LogStreamStderr) 445 } 446 447 // Get tail value squared away - the number of previous log lines we look at 448 var tail int64 449 // in ContainerLogs, if the tail value is ANYTHING non-integer, we just set 450 // it to -1 (all). i don't agree with that, but i also think no tail value 451 // should be legitimate. if you don't pass tail, we assume you want "all" 452 if config.Tail == "all" || config.Tail == "" { 453 // tail of 0 means send all logs on the swarmkit side 454 tail = 0 455 } else { 456 t, err := strconv.Atoi(config.Tail) 457 if err != nil { 458 return nil, errors.New("tail value must be a positive integer or \"all\"") 459 } 460 if t < 0 { 461 return nil, errors.New("negative tail values not supported") 462 } 463 // we actually use negative tail in swarmkit to represent messages 464 // backwards starting from the beginning. also, -1 means no logs. so, 465 // basically, for api compat with docker container logs, add one and 466 // flip the sign. we error above if you try to negative tail, which 467 // isn't supported by docker (and would error deeper in the stack 468 // anyway) 469 // 470 // See the logs protobuf for more information 471 tail = int64(-(t + 1)) 472 } 473 474 // get the since value - the time in the past we're looking at logs starting from 475 var sinceProto *gogotypes.Timestamp 476 if config.Since != "" { 477 s, n, err := timetypes.ParseTimestamps(config.Since, 0) 478 if err != nil { 479 return nil, errors.Wrap(err, "could not parse since timestamp") 480 } 481 since := time.Unix(s, n) 482 sinceProto, err = gogotypes.TimestampProto(since) 483 if err != nil { 484 return nil, errors.Wrap(err, "could not parse timestamp to proto") 485 } 486 } 487 488 stream, err := state.logsClient.SubscribeLogs(ctx, &swarmapi.SubscribeLogsRequest{ 489 Selector: swarmSelector, 490 Options: &swarmapi.LogSubscriptionOptions{ 491 Follow: config.Follow, 492 Streams: stdStreams, 493 Tail: tail, 494 Since: sinceProto, 495 }, 496 }) 497 if err != nil { 498 return nil, err 499 } 500 501 messageChan := make(chan *backend.LogMessage, 1) 502 go func() { 503 defer close(messageChan) 504 for { 505 // Check the context before doing anything. 506 select { 507 case <-ctx.Done(): 508 return 509 default: 510 } 511 subscribeMsg, err := stream.Recv() 512 if err == io.EOF { 513 return 514 } 515 // if we're not io.EOF, push the message in and return 516 if err != nil { 517 select { 518 case <-ctx.Done(): 519 case messageChan <- &backend.LogMessage{Err: err}: 520 } 521 return 522 } 523 524 for _, msg := range subscribeMsg.Messages { 525 // make a new message 526 m := new(backend.LogMessage) 527 m.Attrs = make([]backend.LogAttr, 0, len(msg.Attrs)+3) 528 // add the timestamp, adding the error if it fails 529 m.Timestamp, err = gogotypes.TimestampFromProto(msg.Timestamp) 530 if err != nil { 531 m.Err = err 532 } 533 534 nodeKey := contextPrefix + ".node.id" 535 serviceKey := contextPrefix + ".service.id" 536 taskKey := contextPrefix + ".task.id" 537 538 // copy over all of the details 539 for _, d := range msg.Attrs { 540 switch d.Key { 541 case nodeKey, serviceKey, taskKey: 542 // we have the final say over context details (in case there 543 // is a conflict (if the user added a detail with a context's 544 // key for some reason)) 545 default: 546 m.Attrs = append(m.Attrs, backend.LogAttr{Key: d.Key, Value: d.Value}) 547 } 548 } 549 m.Attrs = append(m.Attrs, 550 backend.LogAttr{Key: nodeKey, Value: msg.Context.NodeID}, 551 backend.LogAttr{Key: serviceKey, Value: msg.Context.ServiceID}, 552 backend.LogAttr{Key: taskKey, Value: msg.Context.TaskID}, 553 ) 554 555 switch msg.Stream { 556 case swarmapi.LogStreamStdout: 557 m.Source = "stdout" 558 case swarmapi.LogStreamStderr: 559 m.Source = "stderr" 560 } 561 m.Line = msg.Data 562 563 // there could be a case where the reader stops accepting 564 // messages and the context is canceled. we need to check that 565 // here, or otherwise we risk blocking forever on the message 566 // send. 567 select { 568 case <-ctx.Done(): 569 return 570 case messageChan <- m: 571 } 572 } 573 } 574 }() 575 return messageChan, nil 576 } 577 578 // convertSelector takes a backend.LogSelector, which contains raw names that 579 // may or may not be valid, and converts them to an api.LogSelector proto. It 580 // returns an error if something fails 581 func convertSelector(ctx context.Context, cc swarmapi.ControlClient, selector *backend.LogSelector) (*swarmapi.LogSelector, error) { 582 // don't rely on swarmkit to resolve IDs, do it ourselves 583 swarmSelector := &swarmapi.LogSelector{} 584 for _, s := range selector.Services { 585 service, err := getService(ctx, cc, s, false) 586 if err != nil { 587 return nil, err 588 } 589 c := service.Spec.Task.GetContainer() 590 if c == nil { 591 return nil, errors.New("logs only supported on container tasks") 592 } 593 swarmSelector.ServiceIDs = append(swarmSelector.ServiceIDs, service.ID) 594 } 595 for _, t := range selector.Tasks { 596 task, err := getTask(ctx, cc, t) 597 if err != nil { 598 return nil, err 599 } 600 c := task.Spec.GetContainer() 601 if c == nil { 602 return nil, errors.New("logs only supported on container tasks") 603 } 604 swarmSelector.TaskIDs = append(swarmSelector.TaskIDs, task.ID) 605 } 606 return swarmSelector, nil 607 } 608 609 // imageWithDigestString takes an image such as name or name:tag 610 // and returns the image pinned to a digest, such as name@sha256:34234 611 func (c *Cluster) imageWithDigestString(ctx context.Context, image string, authConfig *apitypes.AuthConfig) (string, error) { 612 ref, err := reference.ParseAnyReference(image) 613 if err != nil { 614 return "", err 615 } 616 namedRef, ok := ref.(reference.Named) 617 if !ok { 618 if _, ok := ref.(reference.Digested); ok { 619 return image, nil 620 } 621 return "", errors.Errorf("unknown image reference format: %s", image) 622 } 623 // only query registry if not a canonical reference (i.e. with digest) 624 if _, ok := namedRef.(reference.Canonical); !ok { 625 namedRef = reference.TagNameOnly(namedRef) 626 627 taggedRef, ok := namedRef.(reference.NamedTagged) 628 if !ok { 629 return "", errors.Errorf("image reference not tagged: %s", image) 630 } 631 632 repo, err := c.config.ImageBackend.GetRepository(ctx, taggedRef, authConfig) 633 if err != nil { 634 return "", err 635 } 636 dscrptr, err := repo.Tags(ctx).Get(ctx, taggedRef.Tag()) 637 if err != nil { 638 return "", err 639 } 640 641 namedDigestedRef, err := reference.WithDigest(taggedRef, dscrptr.Digest) 642 if err != nil { 643 return "", err 644 } 645 // return familiar form until interface updated to return type 646 return reference.FamiliarString(namedDigestedRef), nil 647 } 648 // reference already contains a digest, so just return it 649 return reference.FamiliarString(ref), nil 650 } 651 652 // digestWarning constructs a formatted warning string 653 // using the image name that could not be pinned by digest. The 654 // formatting is hardcoded, but could me made smarter in the future 655 func digestWarning(image string) string { 656 return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image) 657 }