github.com/kaisenlinux/docker.io@v0.0.0-20230510090727-ea55db55fac7/engine/distribution/pull_v2.go (about) 1 package distribution // import "github.com/docker/docker/distribution" 2 3 import ( 4 "context" 5 "encoding/json" 6 "fmt" 7 "io" 8 "net/url" 9 "os" 10 "runtime" 11 "strings" 12 "time" 13 14 "github.com/containerd/containerd/log" 15 "github.com/containerd/containerd/platforms" 16 "github.com/docker/distribution" 17 "github.com/docker/distribution/manifest/manifestlist" 18 "github.com/docker/distribution/manifest/ocischema" 19 "github.com/docker/distribution/manifest/schema1" 20 "github.com/docker/distribution/manifest/schema2" 21 "github.com/docker/distribution/reference" 22 "github.com/docker/distribution/registry/api/errcode" 23 "github.com/docker/distribution/registry/client/auth" 24 "github.com/docker/distribution/registry/client/transport" 25 "github.com/docker/docker/distribution/metadata" 26 "github.com/docker/docker/distribution/xfer" 27 "github.com/docker/docker/image" 28 v1 "github.com/docker/docker/image/v1" 29 "github.com/docker/docker/layer" 30 "github.com/docker/docker/pkg/ioutils" 31 "github.com/docker/docker/pkg/progress" 32 "github.com/docker/docker/pkg/stringid" 33 "github.com/docker/docker/pkg/system" 34 refstore "github.com/docker/docker/reference" 35 "github.com/docker/docker/registry" 36 digest "github.com/opencontainers/go-digest" 37 specs "github.com/opencontainers/image-spec/specs-go/v1" 38 "github.com/pkg/errors" 39 "github.com/sirupsen/logrus" 40 ) 41 42 var ( 43 errRootFSMismatch = errors.New("layers from manifest don't match image configuration") 44 errRootFSInvalid = errors.New("invalid rootfs in image configuration") 45 ) 46 47 // ImageConfigPullError is an error pulling the image config blob 48 // (only applies to schema2). 49 type ImageConfigPullError struct { 50 Err error 51 } 52 53 // Error returns the error string for ImageConfigPullError. 54 func (e ImageConfigPullError) Error() string { 55 return "error pulling image configuration: " + e.Err.Error() 56 } 57 58 type v2Puller struct { 59 V2MetadataService metadata.V2MetadataService 60 endpoint registry.APIEndpoint 61 config *ImagePullConfig 62 repoInfo *registry.RepositoryInfo 63 repo distribution.Repository 64 // confirmedV2 is set to true if we confirm we're talking to a v2 65 // registry. This is used to limit fallbacks to the v1 protocol. 66 confirmedV2 bool 67 manifestStore *manifestStore 68 } 69 70 func (p *v2Puller) Pull(ctx context.Context, ref reference.Named, platform *specs.Platform) (err error) { 71 // TODO(tiborvass): was ReceiveTimeout 72 p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") 73 if err != nil { 74 logrus.Warnf("Error getting v2 registry: %v", err) 75 return err 76 } 77 78 p.manifestStore.remote, err = p.repo.Manifests(ctx) 79 if err != nil { 80 return err 81 } 82 83 if err = p.pullV2Repository(ctx, ref, platform); err != nil { 84 if _, ok := err.(fallbackError); ok { 85 return err 86 } 87 if continueOnError(err, p.endpoint.Mirror) { 88 return fallbackError{ 89 err: err, 90 confirmedV2: p.confirmedV2, 91 transportOK: true, 92 } 93 } 94 } 95 return err 96 } 97 98 func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named, platform *specs.Platform) (err error) { 99 var layersDownloaded bool 100 if !reference.IsNameOnly(ref) { 101 layersDownloaded, err = p.pullV2Tag(ctx, ref, platform) 102 if err != nil { 103 return err 104 } 105 } else { 106 tags, err := p.repo.Tags(ctx).All(ctx) 107 if err != nil { 108 // If this repository doesn't exist on V2, we should 109 // permit a fallback to V1. 110 return allowV1Fallback(err) 111 } 112 113 // The v2 registry knows about this repository, so we will not 114 // allow fallback to the v1 protocol even if we encounter an 115 // error later on. 116 p.confirmedV2 = true 117 118 for _, tag := range tags { 119 tagRef, err := reference.WithTag(ref, tag) 120 if err != nil { 121 return err 122 } 123 pulledNew, err := p.pullV2Tag(ctx, tagRef, platform) 124 if err != nil { 125 // Since this is the pull-all-tags case, don't 126 // allow an error pulling a particular tag to 127 // make the whole pull fall back to v1. 128 if fallbackErr, ok := err.(fallbackError); ok { 129 return fallbackErr.err 130 } 131 return err 132 } 133 // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged 134 // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? 135 layersDownloaded = layersDownloaded || pulledNew 136 } 137 } 138 139 writeStatus(reference.FamiliarString(ref), p.config.ProgressOutput, layersDownloaded) 140 141 return nil 142 } 143 144 type v2LayerDescriptor struct { 145 digest digest.Digest 146 diffID layer.DiffID 147 repoInfo *registry.RepositoryInfo 148 repo distribution.Repository 149 V2MetadataService metadata.V2MetadataService 150 tmpFile *os.File 151 verifier digest.Verifier 152 src distribution.Descriptor 153 } 154 155 func (ld *v2LayerDescriptor) Key() string { 156 return "v2:" + ld.digest.String() 157 } 158 159 func (ld *v2LayerDescriptor) ID() string { 160 return stringid.TruncateID(ld.digest.String()) 161 } 162 163 func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) { 164 if ld.diffID != "" { 165 return ld.diffID, nil 166 } 167 return ld.V2MetadataService.GetDiffID(ld.digest) 168 } 169 170 func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { 171 logrus.Debugf("pulling blob %q", ld.digest) 172 173 var ( 174 err error 175 offset int64 176 ) 177 178 if ld.tmpFile == nil { 179 ld.tmpFile, err = createDownloadFile() 180 if err != nil { 181 return nil, 0, xfer.DoNotRetry{Err: err} 182 } 183 } else { 184 offset, err = ld.tmpFile.Seek(0, io.SeekEnd) 185 if err != nil { 186 logrus.Debugf("error seeking to end of download file: %v", err) 187 offset = 0 188 189 ld.tmpFile.Close() 190 if err := os.Remove(ld.tmpFile.Name()); err != nil { 191 logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) 192 } 193 ld.tmpFile, err = createDownloadFile() 194 if err != nil { 195 return nil, 0, xfer.DoNotRetry{Err: err} 196 } 197 } else if offset != 0 { 198 logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset) 199 } 200 } 201 202 tmpFile := ld.tmpFile 203 204 layerDownload, err := ld.open(ctx) 205 if err != nil { 206 logrus.Errorf("Error initiating layer download: %v", err) 207 return nil, 0, retryOnError(err) 208 } 209 210 if offset != 0 { 211 _, err := layerDownload.Seek(offset, io.SeekStart) 212 if err != nil { 213 if err := ld.truncateDownloadFile(); err != nil { 214 return nil, 0, xfer.DoNotRetry{Err: err} 215 } 216 return nil, 0, err 217 } 218 } 219 size, err := layerDownload.Seek(0, io.SeekEnd) 220 if err != nil { 221 // Seek failed, perhaps because there was no Content-Length 222 // header. This shouldn't fail the download, because we can 223 // still continue without a progress bar. 224 size = 0 225 } else { 226 if size != 0 && offset > size { 227 logrus.Debug("Partial download is larger than full blob. Starting over") 228 offset = 0 229 if err := ld.truncateDownloadFile(); err != nil { 230 return nil, 0, xfer.DoNotRetry{Err: err} 231 } 232 } 233 234 // Restore the seek offset either at the beginning of the 235 // stream, or just after the last byte we have from previous 236 // attempts. 237 _, err = layerDownload.Seek(offset, io.SeekStart) 238 if err != nil { 239 return nil, 0, err 240 } 241 } 242 243 reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading") 244 defer reader.Close() 245 246 if ld.verifier == nil { 247 ld.verifier = ld.digest.Verifier() 248 } 249 250 _, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier)) 251 if err != nil { 252 if err == transport.ErrWrongCodeForByteRange { 253 if err := ld.truncateDownloadFile(); err != nil { 254 return nil, 0, xfer.DoNotRetry{Err: err} 255 } 256 return nil, 0, err 257 } 258 return nil, 0, retryOnError(err) 259 } 260 261 progress.Update(progressOutput, ld.ID(), "Verifying Checksum") 262 263 if !ld.verifier.Verified() { 264 err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest) 265 logrus.Error(err) 266 267 // Allow a retry if this digest verification error happened 268 // after a resumed download. 269 if offset != 0 { 270 if err := ld.truncateDownloadFile(); err != nil { 271 return nil, 0, xfer.DoNotRetry{Err: err} 272 } 273 274 return nil, 0, err 275 } 276 return nil, 0, xfer.DoNotRetry{Err: err} 277 } 278 279 progress.Update(progressOutput, ld.ID(), "Download complete") 280 281 logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) 282 283 _, err = tmpFile.Seek(0, io.SeekStart) 284 if err != nil { 285 tmpFile.Close() 286 if err := os.Remove(tmpFile.Name()); err != nil { 287 logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) 288 } 289 ld.tmpFile = nil 290 ld.verifier = nil 291 return nil, 0, xfer.DoNotRetry{Err: err} 292 } 293 294 // hand off the temporary file to the download manager, so it will only 295 // be closed once 296 ld.tmpFile = nil 297 298 return ioutils.NewReadCloserWrapper(tmpFile, func() error { 299 tmpFile.Close() 300 err := os.RemoveAll(tmpFile.Name()) 301 if err != nil { 302 logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) 303 } 304 return err 305 }), size, nil 306 } 307 308 func (ld *v2LayerDescriptor) Close() { 309 if ld.tmpFile != nil { 310 ld.tmpFile.Close() 311 if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { 312 logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) 313 } 314 } 315 } 316 317 func (ld *v2LayerDescriptor) truncateDownloadFile() error { 318 // Need a new hash context since we will be redoing the download 319 ld.verifier = nil 320 321 if _, err := ld.tmpFile.Seek(0, io.SeekStart); err != nil { 322 logrus.Errorf("error seeking to beginning of download file: %v", err) 323 return err 324 } 325 326 if err := ld.tmpFile.Truncate(0); err != nil { 327 logrus.Errorf("error truncating download file: %v", err) 328 return err 329 } 330 331 return nil 332 } 333 334 func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) { 335 // Cache mapping from this layer's DiffID to the blobsum 336 ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.Name.Name()}) 337 } 338 339 func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named, platform *specs.Platform) (tagUpdated bool, err error) { 340 341 var ( 342 tagOrDigest string // Used for logging/progress only 343 dgst digest.Digest 344 mt string 345 size int64 346 tagged reference.NamedTagged 347 isTagged bool 348 ) 349 if digested, isDigested := ref.(reference.Canonical); isDigested { 350 dgst = digested.Digest() 351 tagOrDigest = digested.String() 352 } else if tagged, isTagged = ref.(reference.NamedTagged); isTagged { 353 tagService := p.repo.Tags(ctx) 354 desc, err := tagService.Get(ctx, tagged.Tag()) 355 if err != nil { 356 return false, allowV1Fallback(err) 357 } 358 359 dgst = desc.Digest 360 tagOrDigest = tagged.Tag() 361 mt = desc.MediaType 362 size = desc.Size 363 } else { 364 return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", reference.FamiliarString(ref)) 365 } 366 367 ctx = log.WithLogger(ctx, logrus.WithFields( 368 logrus.Fields{ 369 "digest": dgst, 370 "remote": ref, 371 })) 372 373 desc := specs.Descriptor{ 374 MediaType: mt, 375 Digest: dgst, 376 Size: size, 377 } 378 379 manifest, err := p.manifestStore.Get(ctx, desc, ref) 380 if err != nil { 381 if isTagged && isNotFound(errors.Cause(err)) { 382 logrus.WithField("ref", ref).WithError(err).Debug("Falling back to pull manifest by tag") 383 384 msg := `%s Failed to pull manifest by the resolved digest. This registry does not 385 appear to conform to the distribution registry specification; falling back to 386 pull by tag. This fallback is DEPRECATED, and will be removed in a future 387 release. Please contact admins of %s. %s 388 ` 389 390 warnEmoji := "\U000026A0\U0000FE0F" 391 progress.Messagef(p.config.ProgressOutput, "WARNING", msg, warnEmoji, p.endpoint.URL, warnEmoji) 392 393 // Fetch by tag worked, but fetch by digest didn't. 394 // This is a broken registry implementation. 395 // We'll fallback to the old behavior and get the manifest by tag. 396 var ms distribution.ManifestService 397 ms, err = p.repo.Manifests(ctx) 398 if err != nil { 399 return false, err 400 } 401 402 manifest, err = ms.Get(ctx, "", distribution.WithTag(tagged.Tag())) 403 err = errors.Wrap(err, "error after falling back to get manifest by tag") 404 } 405 if err != nil { 406 return false, err 407 } 408 } 409 410 if manifest == nil { 411 return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) 412 } 413 414 if m, ok := manifest.(*schema2.DeserializedManifest); ok { 415 var allowedMediatype bool 416 for _, t := range p.config.Schema2Types { 417 if m.Manifest.Config.MediaType == t { 418 allowedMediatype = true 419 break 420 } 421 } 422 if !allowedMediatype { 423 configClass := mediaTypeClasses[m.Manifest.Config.MediaType] 424 if configClass == "" { 425 configClass = "unknown" 426 } 427 return false, invalidManifestClassError{m.Manifest.Config.MediaType, configClass} 428 } 429 } 430 431 // If manSvc.Get succeeded, we can be confident that the registry on 432 // the other side speaks the v2 protocol. 433 p.confirmedV2 = true 434 435 logrus.Debugf("Pulling ref from V2 registry: %s", reference.FamiliarString(ref)) 436 progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+reference.FamiliarName(p.repo.Named())) 437 438 var ( 439 id digest.Digest 440 manifestDigest digest.Digest 441 ) 442 443 switch v := manifest.(type) { 444 case *schema1.SignedManifest: 445 if p.config.RequireSchema2 { 446 return false, fmt.Errorf("invalid manifest: not schema2") 447 } 448 449 // give registries time to upgrade to schema2 and only warn if we know a registry has been upgraded long time ago 450 // TODO: condition to be removed 451 if reference.Domain(ref) == "docker.io" { 452 msg := fmt.Sprintf("Image %s uses outdated schema1 manifest format. Please upgrade to a schema2 image for better future compatibility. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", ref) 453 logrus.Warn(msg) 454 progress.Message(p.config.ProgressOutput, "", msg) 455 } 456 457 id, manifestDigest, err = p.pullSchema1(ctx, ref, v, platform) 458 if err != nil { 459 return false, err 460 } 461 case *schema2.DeserializedManifest: 462 id, manifestDigest, err = p.pullSchema2(ctx, ref, v, platform) 463 if err != nil { 464 return false, err 465 } 466 case *ocischema.DeserializedManifest: 467 id, manifestDigest, err = p.pullOCI(ctx, ref, v, platform) 468 if err != nil { 469 return false, err 470 } 471 case *manifestlist.DeserializedManifestList: 472 id, manifestDigest, err = p.pullManifestList(ctx, ref, v, platform) 473 if err != nil { 474 return false, err 475 } 476 default: 477 return false, invalidManifestFormatError{} 478 } 479 480 progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String()) 481 482 if p.config.ReferenceStore != nil { 483 oldTagID, err := p.config.ReferenceStore.Get(ref) 484 if err == nil { 485 if oldTagID == id { 486 return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id) 487 } 488 } else if err != refstore.ErrDoesNotExist { 489 return false, err 490 } 491 492 if canonical, ok := ref.(reference.Canonical); ok { 493 if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil { 494 return false, err 495 } 496 } else { 497 if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { 498 return false, err 499 } 500 if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil { 501 return false, err 502 } 503 } 504 } 505 return true, nil 506 } 507 508 func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Reference, unverifiedManifest *schema1.SignedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) { 509 var verifiedManifest *schema1.Manifest 510 verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref) 511 if err != nil { 512 return "", "", err 513 } 514 515 rootFS := image.NewRootFS() 516 517 // remove duplicate layers and check parent chain validity 518 err = fixManifestLayers(verifiedManifest) 519 if err != nil { 520 return "", "", err 521 } 522 523 var descriptors []xfer.DownloadDescriptor 524 525 // Image history converted to the new format 526 var history []image.History 527 528 // Note that the order of this loop is in the direction of bottom-most 529 // to top-most, so that the downloads slice gets ordered correctly. 530 for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { 531 blobSum := verifiedManifest.FSLayers[i].BlobSum 532 if err = blobSum.Validate(); err != nil { 533 return "", "", errors.Wrapf(err, "could not validate layer digest %q", blobSum) 534 } 535 536 var throwAway struct { 537 ThrowAway bool `json:"throwaway,omitempty"` 538 } 539 if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { 540 return "", "", err 541 } 542 543 h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) 544 if err != nil { 545 return "", "", err 546 } 547 history = append(history, h) 548 549 if throwAway.ThrowAway { 550 continue 551 } 552 553 layerDescriptor := &v2LayerDescriptor{ 554 digest: blobSum, 555 repoInfo: p.repoInfo, 556 repo: p.repo, 557 V2MetadataService: p.V2MetadataService, 558 } 559 560 descriptors = append(descriptors, layerDescriptor) 561 } 562 563 // The v1 manifest itself doesn't directly contain an OS. However, 564 // the history does, but unfortunately that's a string, so search through 565 // all the history until hopefully we find one which indicates the OS. 566 // supertest2014/nyan is an example of a registry image with schemav1. 567 configOS := runtime.GOOS 568 if system.LCOWSupported() { 569 type config struct { 570 Os string `json:"os,omitempty"` 571 } 572 for _, v := range verifiedManifest.History { 573 var c config 574 if err := json.Unmarshal([]byte(v.V1Compatibility), &c); err == nil { 575 if c.Os != "" { 576 configOS = c.Os 577 break 578 } 579 } 580 } 581 } 582 583 // In the situation that the API call didn't specify an OS explicitly, but 584 // we support the operating system, switch to that operating system. 585 // eg FROM supertest2014/nyan with no platform specifier, and docker build 586 // with no --platform= flag under LCOW. 587 requestedOS := "" 588 if platform != nil { 589 requestedOS = platform.OS 590 } else if system.IsOSSupported(configOS) { 591 requestedOS = configOS 592 } 593 594 // Early bath if the requested OS doesn't match that of the configuration. 595 // This avoids doing the download, only to potentially fail later. 596 if !strings.EqualFold(configOS, requestedOS) { 597 return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configOS, requestedOS) 598 } 599 600 resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, configOS, descriptors, p.config.ProgressOutput) 601 if err != nil { 602 return "", "", err 603 } 604 defer release() 605 606 config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history) 607 if err != nil { 608 return "", "", err 609 } 610 611 imageID, err := p.config.ImageStore.Put(ctx, config) 612 if err != nil { 613 return "", "", err 614 } 615 616 manifestDigest = digest.FromBytes(unverifiedManifest.Canonical) 617 618 return imageID, manifestDigest, nil 619 } 620 621 func checkSupportedMediaType(mediaType string) error { 622 lowerMt := strings.ToLower(mediaType) 623 for _, mt := range supportedMediaTypes { 624 // The should either be an exact match, or have a valid prefix 625 // we append a "." when matching prefixes to exclude "false positives"; 626 // for example, we don't want to match "application/vnd.oci.images_are_fun_yolo". 627 if lowerMt == mt || strings.HasPrefix(lowerMt, mt+".") { 628 return nil 629 } 630 } 631 return unsupportedMediaTypeError{MediaType: mediaType} 632 } 633 634 func (p *v2Puller) pullSchema2Layers(ctx context.Context, target distribution.Descriptor, layers []distribution.Descriptor, platform *specs.Platform) (id digest.Digest, err error) { 635 if _, err := p.config.ImageStore.Get(ctx, target.Digest); err == nil { 636 // If the image already exists locally, no need to pull 637 // anything. 638 return target.Digest, nil 639 } 640 641 if err := checkSupportedMediaType(target.MediaType); err != nil { 642 return "", err 643 } 644 645 var descriptors []xfer.DownloadDescriptor 646 647 // Note that the order of this loop is in the direction of bottom-most 648 // to top-most, so that the downloads slice gets ordered correctly. 649 for _, d := range layers { 650 if err := d.Digest.Validate(); err != nil { 651 return "", errors.Wrapf(err, "could not validate layer digest %q", d.Digest) 652 } 653 if err := checkSupportedMediaType(d.MediaType); err != nil { 654 return "", err 655 } 656 layerDescriptor := &v2LayerDescriptor{ 657 digest: d.Digest, 658 repo: p.repo, 659 repoInfo: p.repoInfo, 660 V2MetadataService: p.V2MetadataService, 661 src: d, 662 } 663 664 descriptors = append(descriptors, layerDescriptor) 665 } 666 667 configChan := make(chan []byte, 1) 668 configErrChan := make(chan error, 1) 669 layerErrChan := make(chan error, 1) 670 downloadsDone := make(chan struct{}) 671 var cancel func() 672 ctx, cancel = context.WithCancel(ctx) 673 defer cancel() 674 675 // Pull the image config 676 go func() { 677 configJSON, err := p.pullSchema2Config(ctx, target.Digest) 678 if err != nil { 679 configErrChan <- ImageConfigPullError{Err: err} 680 cancel() 681 return 682 } 683 configChan <- configJSON 684 }() 685 686 var ( 687 configJSON []byte // raw serialized image config 688 downloadedRootFS *image.RootFS // rootFS from registered layers 689 configRootFS *image.RootFS // rootFS from configuration 690 release func() // release resources from rootFS download 691 configPlatform *specs.Platform // for LCOW when registering downloaded layers 692 ) 693 694 layerStoreOS := runtime.GOOS 695 if platform != nil { 696 layerStoreOS = platform.OS 697 } 698 699 // https://github.com/docker/docker/issues/24766 - Err on the side of caution, 700 // explicitly blocking images intended for linux from the Windows daemon. On 701 // Windows, we do this before the attempt to download, effectively serialising 702 // the download slightly slowing it down. We have to do it this way, as 703 // chances are the download of layers itself would fail due to file names 704 // which aren't suitable for NTFS. At some point in the future, if a similar 705 // check to block Windows images being pulled on Linux is implemented, it 706 // may be necessary to perform the same type of serialisation. 707 if runtime.GOOS == "windows" { 708 configJSON, configRootFS, configPlatform, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) 709 if err != nil { 710 return "", err 711 } 712 if configRootFS == nil { 713 return "", errRootFSInvalid 714 } 715 if err := checkImageCompatibility(configPlatform.OS, configPlatform.OSVersion); err != nil { 716 return "", err 717 } 718 719 if len(descriptors) != len(configRootFS.DiffIDs) { 720 return "", errRootFSMismatch 721 } 722 if platform == nil { 723 // Early bath if the requested OS doesn't match that of the configuration. 724 // This avoids doing the download, only to potentially fail later. 725 if !system.IsOSSupported(configPlatform.OS) { 726 return "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configPlatform.OS, layerStoreOS) 727 } 728 layerStoreOS = configPlatform.OS 729 } 730 731 // Populate diff ids in descriptors to avoid downloading foreign layers 732 // which have been side loaded 733 for i := range descriptors { 734 descriptors[i].(*v2LayerDescriptor).diffID = configRootFS.DiffIDs[i] 735 } 736 } 737 738 if p.config.DownloadManager != nil { 739 go func() { 740 var ( 741 err error 742 rootFS image.RootFS 743 ) 744 downloadRootFS := *image.NewRootFS() 745 rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, layerStoreOS, descriptors, p.config.ProgressOutput) 746 if err != nil { 747 // Intentionally do not cancel the config download here 748 // as the error from config download (if there is one) 749 // is more interesting than the layer download error 750 layerErrChan <- err 751 return 752 } 753 754 downloadedRootFS = &rootFS 755 close(downloadsDone) 756 }() 757 } else { 758 // We have nothing to download 759 close(downloadsDone) 760 } 761 762 if configJSON == nil { 763 configJSON, configRootFS, _, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) 764 if err == nil && configRootFS == nil { 765 err = errRootFSInvalid 766 } 767 if err != nil { 768 cancel() 769 select { 770 case <-downloadsDone: 771 case <-layerErrChan: 772 } 773 return "", err 774 } 775 } 776 777 select { 778 case <-downloadsDone: 779 case err = <-layerErrChan: 780 return "", err 781 } 782 783 if release != nil { 784 defer release() 785 } 786 787 if downloadedRootFS != nil { 788 // The DiffIDs returned in rootFS MUST match those in the config. 789 // Otherwise the image config could be referencing layers that aren't 790 // included in the manifest. 791 if len(downloadedRootFS.DiffIDs) != len(configRootFS.DiffIDs) { 792 return "", errRootFSMismatch 793 } 794 795 for i := range downloadedRootFS.DiffIDs { 796 if downloadedRootFS.DiffIDs[i] != configRootFS.DiffIDs[i] { 797 return "", errRootFSMismatch 798 } 799 } 800 } 801 802 imageID, err := p.config.ImageStore.Put(ctx, configJSON) 803 if err != nil { 804 return "", err 805 } 806 807 return imageID, nil 808 } 809 810 func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) { 811 manifestDigest, err = schema2ManifestDigest(ref, mfst) 812 if err != nil { 813 return "", "", err 814 } 815 id, err = p.pullSchema2Layers(ctx, mfst.Target(), mfst.Layers, platform) 816 return id, manifestDigest, err 817 } 818 819 func (p *v2Puller) pullOCI(ctx context.Context, ref reference.Named, mfst *ocischema.DeserializedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) { 820 manifestDigest, err = schema2ManifestDigest(ref, mfst) 821 if err != nil { 822 return "", "", err 823 } 824 id, err = p.pullSchema2Layers(ctx, mfst.Target(), mfst.Layers, platform) 825 return id, manifestDigest, err 826 } 827 828 func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, *specs.Platform, error) { 829 select { 830 case configJSON := <-configChan: 831 rootfs, err := s.RootFSFromConfig(configJSON) 832 if err != nil { 833 return nil, nil, nil, err 834 } 835 platform, err := s.PlatformFromConfig(configJSON) 836 if err != nil { 837 return nil, nil, nil, err 838 } 839 return configJSON, rootfs, platform, nil 840 case err := <-errChan: 841 return nil, nil, nil, err 842 // Don't need a case for ctx.Done in the select because cancellation 843 // will trigger an error in p.pullSchema2ImageConfig. 844 } 845 } 846 847 // pullManifestList handles "manifest lists" which point to various 848 // platform-specific manifests. 849 func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList, pp *specs.Platform) (id digest.Digest, manifestListDigest digest.Digest, err error) { 850 manifestListDigest, err = schema2ManifestDigest(ref, mfstList) 851 if err != nil { 852 return "", "", err 853 } 854 855 var platform specs.Platform 856 if pp != nil { 857 platform = *pp 858 } 859 logrus.Debugf("%s resolved to a manifestList object with %d entries; looking for a %s/%s match", ref, len(mfstList.Manifests), platforms.Format(platform), runtime.GOARCH) 860 861 manifestMatches := filterManifests(mfstList.Manifests, platform) 862 863 if len(manifestMatches) == 0 { 864 errMsg := fmt.Sprintf("no matching manifest for %s in the manifest list entries", formatPlatform(platform)) 865 logrus.Debugf(errMsg) 866 return "", "", errors.New(errMsg) 867 } 868 869 if len(manifestMatches) > 1 { 870 logrus.Debugf("found multiple matches in manifest list, choosing best match %s", manifestMatches[0].Digest.String()) 871 } 872 match := manifestMatches[0] 873 874 if err := checkImageCompatibility(match.Platform.OS, match.Platform.OSVersion); err != nil { 875 return "", "", err 876 } 877 878 desc := specs.Descriptor{ 879 Digest: match.Digest, 880 Size: match.Size, 881 MediaType: match.MediaType, 882 } 883 manifest, err := p.manifestStore.Get(ctx, desc, ref) 884 if err != nil { 885 return "", "", err 886 } 887 888 manifestRef, err := reference.WithDigest(reference.TrimNamed(ref), match.Digest) 889 if err != nil { 890 return "", "", err 891 } 892 893 switch v := manifest.(type) { 894 case *schema1.SignedManifest: 895 msg := fmt.Sprintf("[DEPRECATION NOTICE] v2 schema1 manifests in manifest lists are not supported and will break in a future release. Suggest author of %s to upgrade to v2 schema2. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", ref) 896 logrus.Warn(msg) 897 progress.Message(p.config.ProgressOutput, "", msg) 898 899 platform := toOCIPlatform(manifestMatches[0].Platform) 900 id, _, err = p.pullSchema1(ctx, manifestRef, v, &platform) 901 if err != nil { 902 return "", "", err 903 } 904 case *schema2.DeserializedManifest: 905 platform := toOCIPlatform(manifestMatches[0].Platform) 906 id, _, err = p.pullSchema2(ctx, manifestRef, v, &platform) 907 if err != nil { 908 return "", "", err 909 } 910 case *ocischema.DeserializedManifest: 911 platform := toOCIPlatform(manifestMatches[0].Platform) 912 id, _, err = p.pullOCI(ctx, manifestRef, v, &platform) 913 if err != nil { 914 return "", "", err 915 } 916 default: 917 return "", "", errors.New("unsupported manifest format") 918 } 919 920 return id, manifestListDigest, err 921 } 922 923 const ( 924 defaultSchemaPullBackoff = 250 * time.Millisecond 925 defaultMaxSchemaPullAttempts = 5 926 ) 927 928 func (p *v2Puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) { 929 blobs := p.repo.Blobs(ctx) 930 err = retry(ctx, defaultMaxSchemaPullAttempts, defaultSchemaPullBackoff, func(ctx context.Context) (err error) { 931 configJSON, err = blobs.Get(ctx, dgst) 932 return err 933 }) 934 if err != nil { 935 return nil, err 936 } 937 938 // Verify image config digest 939 verifier := dgst.Verifier() 940 if _, err := verifier.Write(configJSON); err != nil { 941 return nil, err 942 } 943 if !verifier.Verified() { 944 err := fmt.Errorf("image config verification failed for digest %s", dgst) 945 logrus.Error(err) 946 return nil, err 947 } 948 949 return configJSON, nil 950 } 951 952 func retry(ctx context.Context, maxAttempts int, sleep time.Duration, f func(ctx context.Context) error) (err error) { 953 attempt := 0 954 for ; attempt < maxAttempts; attempt++ { 955 err = retryOnError(f(ctx)) 956 if err == nil { 957 return nil 958 } 959 if xfer.IsDoNotRetryError(err) { 960 break 961 } 962 963 if attempt+1 < maxAttempts { 964 timer := time.NewTimer(sleep) 965 select { 966 case <-ctx.Done(): 967 timer.Stop() 968 return ctx.Err() 969 case <-timer.C: 970 logrus.WithError(err).WithField("attempts", attempt+1).Debug("retrying after error") 971 sleep *= 2 972 } 973 } 974 } 975 return errors.Wrapf(err, "download failed after attempts=%d", attempt+1) 976 } 977 978 // schema2ManifestDigest computes the manifest digest, and, if pulling by 979 // digest, ensures that it matches the requested digest. 980 func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) { 981 _, canonical, err := mfst.Payload() 982 if err != nil { 983 return "", err 984 } 985 986 // If pull by digest, then verify the manifest digest. 987 if digested, isDigested := ref.(reference.Canonical); isDigested { 988 verifier := digested.Digest().Verifier() 989 if _, err := verifier.Write(canonical); err != nil { 990 return "", err 991 } 992 if !verifier.Verified() { 993 err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) 994 logrus.Error(err) 995 return "", err 996 } 997 return digested.Digest(), nil 998 } 999 1000 return digest.FromBytes(canonical), nil 1001 } 1002 1003 // allowV1Fallback checks if the error is a possible reason to fallback to v1 1004 // (even if confirmedV2 has been set already), and if so, wraps the error in 1005 // a fallbackError with confirmedV2 set to false. Otherwise, it returns the 1006 // error unmodified. 1007 func allowV1Fallback(err error) error { 1008 switch v := err.(type) { 1009 case errcode.Errors: 1010 if len(v) != 0 { 1011 if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) { 1012 return fallbackError{ 1013 err: err, 1014 confirmedV2: false, 1015 transportOK: true, 1016 } 1017 } 1018 } 1019 case errcode.Error: 1020 if shouldV2Fallback(v) { 1021 return fallbackError{ 1022 err: err, 1023 confirmedV2: false, 1024 transportOK: true, 1025 } 1026 } 1027 case *url.Error: 1028 if v.Err == auth.ErrNoBasicAuthCredentials { 1029 return fallbackError{err: err, confirmedV2: false} 1030 } 1031 } 1032 1033 return err 1034 } 1035 1036 func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Reference) (m *schema1.Manifest, err error) { 1037 // If pull by digest, then verify the manifest digest. NOTE: It is 1038 // important to do this first, before any other content validation. If the 1039 // digest cannot be verified, don't even bother with those other things. 1040 if digested, isCanonical := ref.(reference.Canonical); isCanonical { 1041 verifier := digested.Digest().Verifier() 1042 if _, err := verifier.Write(signedManifest.Canonical); err != nil { 1043 return nil, err 1044 } 1045 if !verifier.Verified() { 1046 err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) 1047 logrus.Error(err) 1048 return nil, err 1049 } 1050 } 1051 m = &signedManifest.Manifest 1052 1053 if m.SchemaVersion != 1 { 1054 return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, reference.FamiliarString(ref)) 1055 } 1056 if len(m.FSLayers) != len(m.History) { 1057 return nil, fmt.Errorf("length of history not equal to number of layers for %q", reference.FamiliarString(ref)) 1058 } 1059 if len(m.FSLayers) == 0 { 1060 return nil, fmt.Errorf("no FSLayers in manifest for %q", reference.FamiliarString(ref)) 1061 } 1062 return m, nil 1063 } 1064 1065 // fixManifestLayers removes repeated layers from the manifest and checks the 1066 // correctness of the parent chain. 1067 func fixManifestLayers(m *schema1.Manifest) error { 1068 imgs := make([]*image.V1Image, len(m.FSLayers)) 1069 for i := range m.FSLayers { 1070 img := &image.V1Image{} 1071 1072 if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { 1073 return err 1074 } 1075 1076 imgs[i] = img 1077 if err := v1.ValidateID(img.ID); err != nil { 1078 return err 1079 } 1080 } 1081 1082 if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" { 1083 // Windows base layer can point to a base layer parent that is not in manifest. 1084 return errors.New("invalid parent ID in the base layer of the image") 1085 } 1086 1087 // check general duplicates to error instead of a deadlock 1088 idmap := make(map[string]struct{}) 1089 1090 var lastID string 1091 for _, img := range imgs { 1092 // skip IDs that appear after each other, we handle those later 1093 if _, exists := idmap[img.ID]; img.ID != lastID && exists { 1094 return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) 1095 } 1096 lastID = img.ID 1097 idmap[lastID] = struct{}{} 1098 } 1099 1100 // backwards loop so that we keep the remaining indexes after removing items 1101 for i := len(imgs) - 2; i >= 0; i-- { 1102 if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue 1103 m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) 1104 m.History = append(m.History[:i], m.History[i+1:]...) 1105 } else if imgs[i].Parent != imgs[i+1].ID { 1106 return fmt.Errorf("invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent) 1107 } 1108 } 1109 1110 return nil 1111 } 1112 1113 func createDownloadFile() (*os.File, error) { 1114 return os.CreateTemp("", "GetImageBlob") 1115 } 1116 1117 func toOCIPlatform(p manifestlist.PlatformSpec) specs.Platform { 1118 return specs.Platform{ 1119 OS: p.OS, 1120 Architecture: p.Architecture, 1121 Variant: p.Variant, 1122 OSFeatures: p.OSFeatures, 1123 OSVersion: p.OSVersion, 1124 } 1125 }