github.com/tonistiigi/docker@v0.10.1-0.20240229224939-974013b0dc6a/distribution/pull_v2.go (about) 1 package distribution // import "github.com/docker/docker/distribution" 2 3 import ( 4 "context" 5 "encoding/json" 6 "fmt" 7 "io" 8 "os" 9 "runtime" 10 "strings" 11 "time" 12 13 "github.com/containerd/containerd/platforms" 14 "github.com/containerd/log" 15 "github.com/distribution/reference" 16 "github.com/docker/distribution" 17 "github.com/docker/distribution/manifest/manifestlist" 18 "github.com/docker/distribution/manifest/ocischema" 19 "github.com/docker/distribution/manifest/schema1" 20 "github.com/docker/distribution/manifest/schema2" 21 "github.com/docker/distribution/registry/client/transport" 22 "github.com/docker/docker/distribution/metadata" 23 "github.com/docker/docker/distribution/xfer" 24 "github.com/docker/docker/image" 25 v1 "github.com/docker/docker/image/v1" 26 "github.com/docker/docker/layer" 27 "github.com/docker/docker/pkg/ioutils" 28 "github.com/docker/docker/pkg/progress" 29 "github.com/docker/docker/pkg/stringid" 30 refstore "github.com/docker/docker/reference" 31 "github.com/docker/docker/registry" 32 "github.com/opencontainers/go-digest" 33 ocispec "github.com/opencontainers/image-spec/specs-go/v1" 34 "github.com/pkg/errors" 35 archvariant "github.com/tonistiigi/go-archvariant" 36 ) 37 38 var ( 39 errRootFSMismatch = errors.New("layers from manifest don't match image configuration") 40 errRootFSInvalid = errors.New("invalid rootfs in image configuration") 41 ) 42 43 // imageConfigPullError is an error pulling the image config blob 44 // (only applies to schema2). 45 type imageConfigPullError struct { 46 Err error 47 } 48 49 // Error returns the error string for imageConfigPullError. 50 func (e imageConfigPullError) Error() string { 51 return "error pulling image configuration: " + e.Err.Error() 52 } 53 54 // newPuller returns a puller to pull from a v2 registry. 55 func newPuller(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, config *ImagePullConfig, local ContentStore) *puller { 56 return &puller{ 57 metadataService: metadata.NewV2MetadataService(config.MetadataStore), 58 endpoint: endpoint, 59 config: config, 60 repoInfo: repoInfo, 61 manifestStore: &manifestStore{ 62 local: local, 63 }, 64 } 65 } 66 67 type puller struct { 68 metadataService metadata.V2MetadataService 69 endpoint registry.APIEndpoint 70 config *ImagePullConfig 71 repoInfo *registry.RepositoryInfo 72 repo distribution.Repository 73 manifestStore *manifestStore 74 } 75 76 func (p *puller) pull(ctx context.Context, ref reference.Named) (err error) { 77 // TODO(tiborvass): was ReceiveTimeout 78 p.repo, err = newRepository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") 79 if err != nil { 80 log.G(ctx).Warnf("Error getting v2 registry: %v", err) 81 return err 82 } 83 84 p.manifestStore.remote, err = p.repo.Manifests(ctx) 85 if err != nil { 86 return err 87 } 88 89 return p.pullRepository(ctx, ref) 90 } 91 92 func (p *puller) pullRepository(ctx context.Context, ref reference.Named) (err error) { 93 var layersDownloaded bool 94 if !reference.IsNameOnly(ref) { 95 layersDownloaded, err = p.pullTag(ctx, ref, p.config.Platform) 96 if err != nil { 97 return err 98 } 99 } else { 100 tags, err := p.repo.Tags(ctx).All(ctx) 101 if err != nil { 102 return err 103 } 104 105 for _, tag := range tags { 106 tagRef, err := reference.WithTag(ref, tag) 107 if err != nil { 108 return err 109 } 110 pulledNew, err := p.pullTag(ctx, tagRef, p.config.Platform) 111 if err != nil { 112 // Since this is the pull-all-tags case, don't 113 // allow an error pulling a particular tag to 114 // make the whole pull fall back to v1. 115 if fallbackErr, ok := err.(fallbackError); ok { 116 return fallbackErr.err 117 } 118 return err 119 } 120 // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged 121 // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? 122 layersDownloaded = layersDownloaded || pulledNew 123 } 124 } 125 126 p.writeStatus(reference.FamiliarString(ref), layersDownloaded) 127 128 return nil 129 } 130 131 // writeStatus writes a status message to out. If layersDownloaded is true, the 132 // status message indicates that a newer image was downloaded. Otherwise, it 133 // indicates that the image is up to date. requestedTag is the tag the message 134 // will refer to. 135 func (p *puller) writeStatus(requestedTag string, layersDownloaded bool) { 136 if layersDownloaded { 137 progress.Message(p.config.ProgressOutput, "", "Status: Downloaded newer image for "+requestedTag) 138 } else { 139 progress.Message(p.config.ProgressOutput, "", "Status: Image is up to date for "+requestedTag) 140 } 141 } 142 143 type layerDescriptor struct { 144 digest digest.Digest 145 diffID layer.DiffID 146 repoInfo *registry.RepositoryInfo 147 repo distribution.Repository 148 metadataService metadata.V2MetadataService 149 tmpFile *os.File 150 verifier digest.Verifier 151 src distribution.Descriptor 152 } 153 154 func (ld *layerDescriptor) Key() string { 155 return "v2:" + ld.digest.String() 156 } 157 158 func (ld *layerDescriptor) ID() string { 159 return stringid.TruncateID(ld.digest.String()) 160 } 161 162 func (ld *layerDescriptor) DiffID() (layer.DiffID, error) { 163 if ld.diffID != "" { 164 return ld.diffID, nil 165 } 166 return ld.metadataService.GetDiffID(ld.digest) 167 } 168 169 func (ld *layerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { 170 log.G(ctx).Debugf("pulling blob %q", ld.digest) 171 172 var ( 173 err error 174 offset int64 175 ) 176 177 if ld.tmpFile == nil { 178 ld.tmpFile, err = createDownloadFile() 179 if err != nil { 180 return nil, 0, xfer.DoNotRetry{Err: err} 181 } 182 } else { 183 offset, err = ld.tmpFile.Seek(0, io.SeekEnd) 184 if err != nil { 185 log.G(ctx).Debugf("error seeking to end of download file: %v", err) 186 offset = 0 187 188 ld.tmpFile.Close() 189 if err := os.Remove(ld.tmpFile.Name()); err != nil { 190 log.G(ctx).Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) 191 } 192 ld.tmpFile, err = createDownloadFile() 193 if err != nil { 194 return nil, 0, xfer.DoNotRetry{Err: err} 195 } 196 } else if offset != 0 { 197 log.G(ctx).Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset) 198 } 199 } 200 201 tmpFile := ld.tmpFile 202 203 layerDownload, err := ld.open(ctx) 204 if err != nil { 205 log.G(ctx).Errorf("Error initiating layer download: %v", err) 206 return nil, 0, retryOnError(err) 207 } 208 209 if offset != 0 { 210 _, err := layerDownload.Seek(offset, io.SeekStart) 211 if err != nil { 212 if err := ld.truncateDownloadFile(); err != nil { 213 return nil, 0, xfer.DoNotRetry{Err: err} 214 } 215 return nil, 0, err 216 } 217 } 218 size, err := layerDownload.Seek(0, io.SeekEnd) 219 if err != nil { 220 // Seek failed, perhaps because there was no Content-Length 221 // header. This shouldn't fail the download, because we can 222 // still continue without a progress bar. 223 size = 0 224 } else { 225 if size != 0 && offset > size { 226 log.G(ctx).Debug("Partial download is larger than full blob. Starting over") 227 offset = 0 228 if err := ld.truncateDownloadFile(); err != nil { 229 return nil, 0, xfer.DoNotRetry{Err: err} 230 } 231 } 232 233 // Restore the seek offset either at the beginning of the 234 // stream, or just after the last byte we have from previous 235 // attempts. 236 _, err = layerDownload.Seek(offset, io.SeekStart) 237 if err != nil { 238 return nil, 0, err 239 } 240 } 241 242 reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading") 243 defer reader.Close() 244 245 if ld.verifier == nil { 246 ld.verifier = ld.digest.Verifier() 247 } 248 249 _, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier)) 250 if err != nil { 251 if err == transport.ErrWrongCodeForByteRange { 252 if err := ld.truncateDownloadFile(); err != nil { 253 return nil, 0, xfer.DoNotRetry{Err: err} 254 } 255 return nil, 0, err 256 } 257 return nil, 0, retryOnError(err) 258 } 259 260 progress.Update(progressOutput, ld.ID(), "Verifying Checksum") 261 262 if !ld.verifier.Verified() { 263 err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest) 264 log.G(ctx).Error(err) 265 266 // Allow a retry if this digest verification error happened 267 // after a resumed download. 268 if offset != 0 { 269 if err := ld.truncateDownloadFile(); err != nil { 270 return nil, 0, xfer.DoNotRetry{Err: err} 271 } 272 273 return nil, 0, err 274 } 275 return nil, 0, xfer.DoNotRetry{Err: err} 276 } 277 278 progress.Update(progressOutput, ld.ID(), "Download complete") 279 280 log.G(ctx).Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) 281 282 _, err = tmpFile.Seek(0, io.SeekStart) 283 if err != nil { 284 tmpFile.Close() 285 if err := os.Remove(tmpFile.Name()); err != nil { 286 log.G(ctx).Errorf("Failed to remove temp file: %s", tmpFile.Name()) 287 } 288 ld.tmpFile = nil 289 ld.verifier = nil 290 return nil, 0, xfer.DoNotRetry{Err: err} 291 } 292 293 // hand off the temporary file to the download manager, so it will only 294 // be closed once 295 ld.tmpFile = nil 296 297 return ioutils.NewReadCloserWrapper(tmpFile, func() error { 298 tmpFile.Close() 299 err := os.RemoveAll(tmpFile.Name()) 300 if err != nil { 301 log.G(ctx).Errorf("Failed to remove temp file: %s", tmpFile.Name()) 302 } 303 return err 304 }), size, nil 305 } 306 307 func (ld *layerDescriptor) Close() { 308 if ld.tmpFile != nil { 309 ld.tmpFile.Close() 310 if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { 311 log.G(context.TODO()).Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) 312 } 313 } 314 } 315 316 func (ld *layerDescriptor) truncateDownloadFile() error { 317 // Need a new hash context since we will be redoing the download 318 ld.verifier = nil 319 320 if _, err := ld.tmpFile.Seek(0, io.SeekStart); err != nil { 321 log.G(context.TODO()).Errorf("error seeking to beginning of download file: %v", err) 322 return err 323 } 324 325 if err := ld.tmpFile.Truncate(0); err != nil { 326 log.G(context.TODO()).Errorf("error truncating download file: %v", err) 327 return err 328 } 329 330 return nil 331 } 332 333 func (ld *layerDescriptor) Registered(diffID layer.DiffID) { 334 // Cache mapping from this layer's DiffID to the blobsum 335 _ = ld.metadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.Name.Name()}) 336 } 337 338 func (p *puller) pullTag(ctx context.Context, ref reference.Named, platform *ocispec.Platform) (tagUpdated bool, err error) { 339 var ( 340 tagOrDigest string // Used for logging/progress only 341 dgst digest.Digest 342 mt string 343 size int64 344 tagged reference.NamedTagged 345 isTagged bool 346 ) 347 if digested, isDigested := ref.(reference.Canonical); isDigested { 348 dgst = digested.Digest() 349 tagOrDigest = digested.String() 350 } else if tagged, isTagged = ref.(reference.NamedTagged); isTagged { 351 tagService := p.repo.Tags(ctx) 352 desc, err := tagService.Get(ctx, tagged.Tag()) 353 if err != nil { 354 return false, err 355 } 356 357 dgst = desc.Digest 358 tagOrDigest = tagged.Tag() 359 mt = desc.MediaType 360 size = desc.Size 361 } else { 362 return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", reference.FamiliarString(ref)) 363 } 364 365 ctx = log.WithLogger(ctx, log.G(ctx).WithFields(log.Fields{ 366 "digest": dgst, 367 "remote": ref, 368 })) 369 370 desc := ocispec.Descriptor{ 371 MediaType: mt, 372 Digest: dgst, 373 Size: size, 374 } 375 376 manifest, err := p.manifestStore.Get(ctx, desc, ref) 377 if err != nil { 378 if isTagged && isNotFound(errors.Cause(err)) { 379 log.G(ctx).WithField("ref", ref).WithError(err).Debug("Falling back to pull manifest by tag") 380 381 msg := `%s Failed to pull manifest by the resolved digest. This registry does not 382 appear to conform to the distribution registry specification; falling back to 383 pull by tag. This fallback is DEPRECATED, and will be removed in a future 384 release. Please contact admins of %s. %s 385 ` 386 387 warnEmoji := "\U000026A0\U0000FE0F" 388 progress.Messagef(p.config.ProgressOutput, "WARNING", msg, warnEmoji, p.endpoint.URL, warnEmoji) 389 390 // Fetch by tag worked, but fetch by digest didn't. 391 // This is a broken registry implementation. 392 // We'll fallback to the old behavior and get the manifest by tag. 393 var ms distribution.ManifestService 394 ms, err = p.repo.Manifests(ctx) 395 if err != nil { 396 return false, err 397 } 398 399 manifest, err = ms.Get(ctx, "", distribution.WithTag(tagged.Tag())) 400 err = errors.Wrap(err, "error after falling back to get manifest by tag") 401 } 402 if err != nil { 403 return false, err 404 } 405 } 406 407 if manifest == nil { 408 return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) 409 } 410 411 if m, ok := manifest.(*schema2.DeserializedManifest); ok { 412 if err := p.validateMediaType(m.Manifest.Config.MediaType); err != nil { 413 return false, err 414 } 415 } 416 417 log.G(ctx).Debugf("Pulling ref from V2 registry: %s", reference.FamiliarString(ref)) 418 progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+reference.FamiliarName(p.repo.Named())) 419 420 var ( 421 id digest.Digest 422 manifestDigest digest.Digest 423 ) 424 425 switch v := manifest.(type) { 426 case *schema1.SignedManifest: 427 err := DeprecatedSchema1ImageError(ref) 428 log.G(ctx).Warn(err.Error()) 429 if os.Getenv("DOCKER_ENABLE_DEPRECATED_PULL_SCHEMA_1_IMAGE") == "" { 430 return false, err 431 } 432 progress.Message(p.config.ProgressOutput, "", err.Error()) 433 434 id, manifestDigest, err = p.pullSchema1(ctx, ref, v, platform) 435 if err != nil { 436 return false, err 437 } 438 case *schema2.DeserializedManifest: 439 id, manifestDigest, err = p.pullSchema2(ctx, ref, v, platform) 440 if err != nil { 441 return false, err 442 } 443 case *ocischema.DeserializedManifest: 444 id, manifestDigest, err = p.pullOCI(ctx, ref, v, platform) 445 if err != nil { 446 return false, err 447 } 448 case *manifestlist.DeserializedManifestList: 449 id, manifestDigest, err = p.pullManifestList(ctx, ref, v, platform) 450 if err != nil { 451 return false, err 452 } 453 default: 454 return false, invalidManifestFormatError{} 455 } 456 457 progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String()) 458 459 if p.config.ReferenceStore != nil { 460 oldTagID, err := p.config.ReferenceStore.Get(ref) 461 if err == nil { 462 if oldTagID == id { 463 return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id) 464 } 465 } else if err != refstore.ErrDoesNotExist { 466 return false, err 467 } 468 469 if canonical, ok := ref.(reference.Canonical); ok { 470 if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil { 471 return false, err 472 } 473 } else { 474 if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { 475 return false, err 476 } 477 if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil { 478 return false, err 479 } 480 } 481 } 482 return true, nil 483 } 484 485 // validateMediaType validates if the given mediaType is accepted by the puller's 486 // configuration. 487 func (p *puller) validateMediaType(mediaType string) error { 488 var allowedMediaTypes []string 489 if len(p.config.Schema2Types) > 0 { 490 allowedMediaTypes = p.config.Schema2Types 491 } else { 492 allowedMediaTypes = defaultImageTypes 493 } 494 for _, t := range allowedMediaTypes { 495 if mediaType == t { 496 return nil 497 } 498 } 499 500 configClass := mediaTypeClasses[mediaType] 501 if configClass == "" { 502 configClass = "unknown" 503 } 504 return invalidManifestClassError{mediaType, configClass} 505 } 506 507 func (p *puller) pullSchema1(ctx context.Context, ref reference.Reference, unverifiedManifest *schema1.SignedManifest, platform *ocispec.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) { 508 if platform != nil { 509 // Early bath if the requested OS doesn't match that of the configuration. 510 // This avoids doing the download, only to potentially fail later. 511 if err := image.CheckOS(platform.OS); err != nil { 512 return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", runtime.GOOS, platform.OS) 513 } 514 } 515 516 var verifiedManifest *schema1.Manifest 517 verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref) 518 if err != nil { 519 return "", "", err 520 } 521 522 rootFS := image.NewRootFS() 523 524 // remove duplicate layers and check parent chain validity 525 err = fixManifestLayers(verifiedManifest) 526 if err != nil { 527 return "", "", err 528 } 529 530 var descriptors []xfer.DownloadDescriptor 531 532 // Image history converted to the new format 533 var history []image.History 534 535 // Note that the order of this loop is in the direction of bottom-most 536 // to top-most, so that the downloads slice gets ordered correctly. 537 for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { 538 blobSum := verifiedManifest.FSLayers[i].BlobSum 539 if err = blobSum.Validate(); err != nil { 540 return "", "", errors.Wrapf(err, "could not validate layer digest %q", blobSum) 541 } 542 543 var throwAway struct { 544 ThrowAway bool `json:"throwaway,omitempty"` 545 } 546 if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { 547 return "", "", err 548 } 549 550 h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) 551 if err != nil { 552 return "", "", err 553 } 554 history = append(history, h) 555 556 if throwAway.ThrowAway { 557 continue 558 } 559 560 layerDescriptor := &layerDescriptor{ 561 digest: blobSum, 562 repoInfo: p.repoInfo, 563 repo: p.repo, 564 metadataService: p.metadataService, 565 } 566 567 descriptors = append(descriptors, layerDescriptor) 568 } 569 570 resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) 571 if err != nil { 572 return "", "", err 573 } 574 defer release() 575 576 config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history) 577 if err != nil { 578 return "", "", err 579 } 580 581 imageID, err := p.config.ImageStore.Put(ctx, config) 582 if err != nil { 583 return "", "", err 584 } 585 586 manifestDigest = digest.FromBytes(unverifiedManifest.Canonical) 587 588 return imageID, manifestDigest, nil 589 } 590 591 func checkSupportedMediaType(mediaType string) error { 592 lowerMt := strings.ToLower(mediaType) 593 for _, mt := range supportedMediaTypes { 594 // The should either be an exact match, or have a valid prefix 595 // we append a "." when matching prefixes to exclude "false positives"; 596 // for example, we don't want to match "application/vnd.oci.images_are_fun_yolo". 597 if lowerMt == mt || strings.HasPrefix(lowerMt, mt+".") { 598 return nil 599 } 600 } 601 return unsupportedMediaTypeError{MediaType: mediaType} 602 } 603 604 func (p *puller) pullSchema2Layers(ctx context.Context, target distribution.Descriptor, layers []distribution.Descriptor, platform *ocispec.Platform) (id digest.Digest, err error) { 605 if _, err := p.config.ImageStore.Get(ctx, target.Digest); err == nil { 606 // If the image already exists locally, no need to pull 607 // anything. 608 return target.Digest, nil 609 } 610 611 if err := checkSupportedMediaType(target.MediaType); err != nil { 612 return "", err 613 } 614 615 var descriptors []xfer.DownloadDescriptor 616 617 // Note that the order of this loop is in the direction of bottom-most 618 // to top-most, so that the downloads slice gets ordered correctly. 619 for _, d := range layers { 620 if err := d.Digest.Validate(); err != nil { 621 return "", errors.Wrapf(err, "could not validate layer digest %q", d.Digest) 622 } 623 if err := checkSupportedMediaType(d.MediaType); err != nil { 624 return "", err 625 } 626 layerDescriptor := &layerDescriptor{ 627 digest: d.Digest, 628 repo: p.repo, 629 repoInfo: p.repoInfo, 630 metadataService: p.metadataService, 631 src: d, 632 } 633 634 descriptors = append(descriptors, layerDescriptor) 635 } 636 637 configChan := make(chan []byte, 1) 638 configErrChan := make(chan error, 1) 639 layerErrChan := make(chan error, 1) 640 downloadsDone := make(chan struct{}) 641 var cancel func() 642 ctx, cancel = context.WithCancel(ctx) 643 defer cancel() 644 645 // Pull the image config 646 go func() { 647 configJSON, err := p.pullSchema2Config(ctx, target.Digest) 648 if err != nil { 649 configErrChan <- imageConfigPullError{Err: err} 650 cancel() 651 return 652 } 653 configChan <- configJSON 654 }() 655 656 var ( 657 configJSON []byte // raw serialized image config 658 downloadedRootFS *image.RootFS // rootFS from registered layers 659 configRootFS *image.RootFS // rootFS from configuration 660 release func() // release resources from rootFS download 661 configPlatform *ocispec.Platform // for LCOW when registering downloaded layers 662 ) 663 664 layerStoreOS := runtime.GOOS 665 if platform != nil { 666 layerStoreOS = platform.OS 667 } 668 669 // https://github.com/docker/docker/issues/24766 - Err on the side of caution, 670 // explicitly blocking images intended for linux from the Windows daemon. On 671 // Windows, we do this before the attempt to download, effectively serialising 672 // the download slightly slowing it down. We have to do it this way, as 673 // chances are the download of layers itself would fail due to file names 674 // which aren't suitable for NTFS. At some point in the future, if a similar 675 // check to block Windows images being pulled on Linux is implemented, it 676 // may be necessary to perform the same type of serialisation. 677 if runtime.GOOS == "windows" { 678 configJSON, configRootFS, configPlatform, err = receiveConfig(configChan, configErrChan) 679 if err != nil { 680 return "", err 681 } 682 if configRootFS == nil { 683 return "", errRootFSInvalid 684 } 685 if err := checkImageCompatibility(configPlatform.OS, configPlatform.OSVersion); err != nil { 686 return "", err 687 } 688 689 if len(descriptors) != len(configRootFS.DiffIDs) { 690 return "", errRootFSMismatch 691 } 692 if platform == nil { 693 // Early bath if the requested OS doesn't match that of the configuration. 694 // This avoids doing the download, only to potentially fail later. 695 if err := image.CheckOS(configPlatform.OS); err != nil { 696 return "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configPlatform.OS, layerStoreOS) 697 } 698 layerStoreOS = configPlatform.OS 699 } 700 701 // Populate diff ids in descriptors to avoid downloading foreign layers 702 // which have been side loaded 703 for i := range descriptors { 704 descriptors[i].(*layerDescriptor).diffID = configRootFS.DiffIDs[i] 705 } 706 } 707 708 // Assume that the operating system is the host OS if blank, and validate it 709 // to ensure we don't cause a panic by an invalid index into the layerstores. 710 if layerStoreOS != "" { 711 if err := image.CheckOS(layerStoreOS); err != nil { 712 return "", err 713 } 714 } 715 716 if p.config.DownloadManager != nil { 717 go func() { 718 var ( 719 err error 720 rootFS image.RootFS 721 ) 722 downloadRootFS := *image.NewRootFS() 723 rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput) 724 if err != nil { 725 // Intentionally do not cancel the config download here 726 // as the error from config download (if there is one) 727 // is more interesting than the layer download error 728 layerErrChan <- err 729 return 730 } 731 732 downloadedRootFS = &rootFS 733 close(downloadsDone) 734 }() 735 } else { 736 // We have nothing to download 737 close(downloadsDone) 738 } 739 740 if configJSON == nil { 741 configJSON, configRootFS, _, err = receiveConfig(configChan, configErrChan) 742 if err == nil && configRootFS == nil { 743 err = errRootFSInvalid 744 } 745 if err != nil { 746 cancel() 747 select { 748 case <-downloadsDone: 749 case <-layerErrChan: 750 } 751 return "", err 752 } 753 } 754 755 select { 756 case <-downloadsDone: 757 case err = <-layerErrChan: 758 return "", err 759 } 760 761 if release != nil { 762 defer release() 763 } 764 765 if downloadedRootFS != nil { 766 // The DiffIDs returned in rootFS MUST match those in the config. 767 // Otherwise the image config could be referencing layers that aren't 768 // included in the manifest. 769 if len(downloadedRootFS.DiffIDs) != len(configRootFS.DiffIDs) { 770 return "", errRootFSMismatch 771 } 772 773 for i := range downloadedRootFS.DiffIDs { 774 if downloadedRootFS.DiffIDs[i] != configRootFS.DiffIDs[i] { 775 return "", errRootFSMismatch 776 } 777 } 778 } 779 780 imageID, err := p.config.ImageStore.Put(ctx, configJSON) 781 if err != nil { 782 return "", err 783 } 784 785 return imageID, nil 786 } 787 788 func (p *puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest, platform *ocispec.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) { 789 manifestDigest, err = schema2ManifestDigest(ref, mfst) 790 if err != nil { 791 return "", "", err 792 } 793 id, err = p.pullSchema2Layers(ctx, mfst.Target(), mfst.Layers, platform) 794 return id, manifestDigest, err 795 } 796 797 func (p *puller) pullOCI(ctx context.Context, ref reference.Named, mfst *ocischema.DeserializedManifest, platform *ocispec.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) { 798 manifestDigest, err = schema2ManifestDigest(ref, mfst) 799 if err != nil { 800 return "", "", err 801 } 802 id, err = p.pullSchema2Layers(ctx, mfst.Target(), mfst.Layers, platform) 803 return id, manifestDigest, err 804 } 805 806 func receiveConfig(configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, *ocispec.Platform, error) { 807 select { 808 case configJSON := <-configChan: 809 rootfs, err := rootFSFromConfig(configJSON) 810 if err != nil { 811 return nil, nil, nil, err 812 } 813 platform, err := platformFromConfig(configJSON) 814 if err != nil { 815 return nil, nil, nil, err 816 } 817 return configJSON, rootfs, platform, nil 818 case err := <-errChan: 819 return nil, nil, nil, err 820 // Don't need a case for ctx.Done in the select because cancellation 821 // will trigger an error in p.pullSchema2ImageConfig. 822 } 823 } 824 825 // pullManifestList handles "manifest lists" which point to various 826 // platform-specific manifests. 827 func (p *puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList, pp *ocispec.Platform) (id digest.Digest, manifestListDigest digest.Digest, err error) { 828 manifestListDigest, err = schema2ManifestDigest(ref, mfstList) 829 if err != nil { 830 return "", "", err 831 } 832 833 var platform ocispec.Platform 834 if pp != nil { 835 platform = *pp 836 } 837 log.G(ctx).Debugf("%s resolved to a manifestList object with %d entries; looking for a %s match", ref, len(mfstList.Manifests), platforms.Format(platform)) 838 839 manifestMatches := filterManifests(mfstList.Manifests, platform) 840 841 for _, match := range manifestMatches { 842 if err := checkImageCompatibility(match.Platform.OS, match.Platform.OSVersion); err != nil { 843 return "", "", err 844 } 845 846 desc := ocispec.Descriptor{ 847 Digest: match.Digest, 848 Size: match.Size, 849 MediaType: match.MediaType, 850 } 851 manifest, err := p.manifestStore.Get(ctx, desc, ref) 852 if err != nil { 853 return "", "", err 854 } 855 856 manifestRef, err := reference.WithDigest(reference.TrimNamed(ref), match.Digest) 857 if err != nil { 858 return "", "", err 859 } 860 861 switch v := manifest.(type) { 862 case *schema1.SignedManifest: 863 err := DeprecatedSchema1ImageError(ref) 864 log.G(ctx).Warn(err.Error()) 865 if os.Getenv("DOCKER_ENABLE_DEPRECATED_PULL_SCHEMA_1_IMAGE") == "" { 866 return "", "", err 867 } 868 progress.Message(p.config.ProgressOutput, "", err.Error()) 869 870 platform := toOCIPlatform(match.Platform) 871 id, _, err = p.pullSchema1(ctx, manifestRef, v, platform) 872 if err != nil { 873 return "", "", err 874 } 875 case *schema2.DeserializedManifest: 876 platform := toOCIPlatform(match.Platform) 877 id, _, err = p.pullSchema2(ctx, manifestRef, v, platform) 878 if err != nil { 879 return "", "", err 880 } 881 case *ocischema.DeserializedManifest: 882 platform := toOCIPlatform(match.Platform) 883 id, _, err = p.pullOCI(ctx, manifestRef, v, platform) 884 if err != nil { 885 return "", "", err 886 } 887 case *manifestlist.DeserializedManifestList: 888 id, _, err = p.pullManifestList(ctx, manifestRef, v, pp) 889 if err != nil { 890 var noMatches noMatchesErr 891 if !errors.As(err, &noMatches) { 892 // test the next match 893 continue 894 } 895 } 896 default: 897 // OCI spec requires to skip unknown manifest types 898 continue 899 } 900 return id, manifestListDigest, err 901 } 902 return "", "", noMatchesErr{platform: platform} 903 } 904 905 const ( 906 defaultSchemaPullBackoff = 250 * time.Millisecond 907 defaultMaxSchemaPullAttempts = 5 908 ) 909 910 func (p *puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) { 911 blobs := p.repo.Blobs(ctx) 912 err = retry(ctx, defaultMaxSchemaPullAttempts, defaultSchemaPullBackoff, func(ctx context.Context) (err error) { 913 configJSON, err = blobs.Get(ctx, dgst) 914 return err 915 }) 916 if err != nil { 917 return nil, err 918 } 919 920 // Verify image config digest 921 verifier := dgst.Verifier() 922 if _, err := verifier.Write(configJSON); err != nil { 923 return nil, err 924 } 925 if !verifier.Verified() { 926 err := fmt.Errorf("image config verification failed for digest %s", dgst) 927 log.G(ctx).Error(err) 928 return nil, err 929 } 930 931 return configJSON, nil 932 } 933 934 type noMatchesErr struct { 935 platform ocispec.Platform 936 } 937 938 func (e noMatchesErr) Error() string { 939 return fmt.Sprintf("no matching manifest for %s in the manifest list entries", formatPlatform(e.platform)) 940 } 941 942 func retry(ctx context.Context, maxAttempts int, sleep time.Duration, f func(ctx context.Context) error) (err error) { 943 attempt := 0 944 for ; attempt < maxAttempts; attempt++ { 945 err = retryOnError(f(ctx)) 946 if err == nil { 947 return nil 948 } 949 if xfer.IsDoNotRetryError(err) { 950 break 951 } 952 953 if attempt+1 < maxAttempts { 954 timer := time.NewTimer(sleep) 955 select { 956 case <-ctx.Done(): 957 timer.Stop() 958 return ctx.Err() 959 case <-timer.C: 960 log.G(ctx).WithError(err).WithField("attempts", attempt+1).Debug("retrying after error") 961 sleep *= 2 962 } 963 } 964 } 965 return errors.Wrapf(err, "download failed after attempts=%d", attempt+1) 966 } 967 968 // schema2ManifestDigest computes the manifest digest, and, if pulling by 969 // digest, ensures that it matches the requested digest. 970 func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) { 971 _, canonical, err := mfst.Payload() 972 if err != nil { 973 return "", err 974 } 975 976 // If pull by digest, then verify the manifest digest. 977 if digested, isDigested := ref.(reference.Canonical); isDigested { 978 verifier := digested.Digest().Verifier() 979 if _, err := verifier.Write(canonical); err != nil { 980 return "", err 981 } 982 if !verifier.Verified() { 983 err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) 984 log.G(context.TODO()).Error(err) 985 return "", err 986 } 987 return digested.Digest(), nil 988 } 989 990 return digest.FromBytes(canonical), nil 991 } 992 993 func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Reference) (m *schema1.Manifest, err error) { 994 // If pull by digest, then verify the manifest digest. NOTE: It is 995 // important to do this first, before any other content validation. If the 996 // digest cannot be verified, don't even bother with those other things. 997 if digested, isCanonical := ref.(reference.Canonical); isCanonical { 998 verifier := digested.Digest().Verifier() 999 if _, err := verifier.Write(signedManifest.Canonical); err != nil { 1000 return nil, err 1001 } 1002 if !verifier.Verified() { 1003 err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) 1004 log.G(context.TODO()).Error(err) 1005 return nil, err 1006 } 1007 } 1008 m = &signedManifest.Manifest 1009 1010 if m.SchemaVersion != 1 { 1011 return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, reference.FamiliarString(ref)) 1012 } 1013 if len(m.FSLayers) != len(m.History) { 1014 return nil, fmt.Errorf("length of history not equal to number of layers for %q", reference.FamiliarString(ref)) 1015 } 1016 if len(m.FSLayers) == 0 { 1017 return nil, fmt.Errorf("no FSLayers in manifest for %q", reference.FamiliarString(ref)) 1018 } 1019 return m, nil 1020 } 1021 1022 // fixManifestLayers removes repeated layers from the manifest and checks the 1023 // correctness of the parent chain. 1024 func fixManifestLayers(m *schema1.Manifest) error { 1025 imgs := make([]*image.V1Image, len(m.FSLayers)) 1026 for i := range m.FSLayers { 1027 img := &image.V1Image{} 1028 1029 if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { 1030 return err 1031 } 1032 1033 imgs[i] = img 1034 if err := v1.ValidateID(img.ID); err != nil { 1035 return err 1036 } 1037 } 1038 1039 if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" { 1040 // Windows base layer can point to a base layer parent that is not in manifest. 1041 return errors.New("invalid parent ID in the base layer of the image") 1042 } 1043 1044 // check general duplicates to error instead of a deadlock 1045 idmap := make(map[string]struct{}) 1046 1047 var lastID string 1048 for _, img := range imgs { 1049 // skip IDs that appear after each other, we handle those later 1050 if _, exists := idmap[img.ID]; img.ID != lastID && exists { 1051 return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) 1052 } 1053 lastID = img.ID 1054 idmap[lastID] = struct{}{} 1055 } 1056 1057 // backwards loop so that we keep the remaining indexes after removing items 1058 for i := len(imgs) - 2; i >= 0; i-- { 1059 if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue 1060 m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) 1061 m.History = append(m.History[:i], m.History[i+1:]...) 1062 } else if imgs[i].Parent != imgs[i+1].ID { 1063 return fmt.Errorf("invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent) 1064 } 1065 } 1066 1067 return nil 1068 } 1069 1070 func createDownloadFile() (*os.File, error) { 1071 return os.CreateTemp("", "GetImageBlob") 1072 } 1073 1074 func toOCIPlatform(p manifestlist.PlatformSpec) *ocispec.Platform { 1075 // distribution pkg does define platform as pointer so this hack for empty struct 1076 // is necessary. This is temporary until correct OCI image-spec package is used. 1077 if p.OS == "" && p.Architecture == "" && p.Variant == "" && p.OSVersion == "" && p.OSFeatures == nil && p.Features == nil { 1078 return nil 1079 } 1080 return &ocispec.Platform{ 1081 OS: p.OS, 1082 Architecture: p.Architecture, 1083 Variant: p.Variant, 1084 OSFeatures: p.OSFeatures, 1085 OSVersion: p.OSVersion, 1086 } 1087 } 1088 1089 // maximumSpec returns the distribution platform with maximum compatibility for the current node. 1090 func maximumSpec() ocispec.Platform { 1091 p := platforms.DefaultSpec() 1092 if p.Architecture == "amd64" { 1093 p.Variant = archvariant.AMD64Variant() 1094 } 1095 return p 1096 }