github.com/rawahars/moby@v24.0.4+incompatible/distribution/pull_v2.go (about)

     1  package distribution // import "github.com/docker/docker/distribution"
     2  
     3  import (
     4  	"context"
     5  	"encoding/json"
     6  	"fmt"
     7  	"io"
     8  	"os"
     9  	"runtime"
    10  	"strings"
    11  	"time"
    12  
    13  	"github.com/containerd/containerd/log"
    14  	"github.com/containerd/containerd/platforms"
    15  	"github.com/docker/distribution"
    16  	"github.com/docker/distribution/manifest/manifestlist"
    17  	"github.com/docker/distribution/manifest/ocischema"
    18  	"github.com/docker/distribution/manifest/schema1"
    19  	"github.com/docker/distribution/manifest/schema2"
    20  	"github.com/docker/distribution/reference"
    21  	"github.com/docker/distribution/registry/client/transport"
    22  	"github.com/docker/docker/distribution/metadata"
    23  	"github.com/docker/docker/distribution/xfer"
    24  	"github.com/docker/docker/image"
    25  	v1 "github.com/docker/docker/image/v1"
    26  	"github.com/docker/docker/layer"
    27  	"github.com/docker/docker/pkg/ioutils"
    28  	"github.com/docker/docker/pkg/progress"
    29  	"github.com/docker/docker/pkg/stringid"
    30  	"github.com/docker/docker/pkg/system"
    31  	refstore "github.com/docker/docker/reference"
    32  	"github.com/docker/docker/registry"
    33  	"github.com/opencontainers/go-digest"
    34  	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
    35  	"github.com/pkg/errors"
    36  	"github.com/sirupsen/logrus"
    37  	archvariant "github.com/tonistiigi/go-archvariant"
    38  )
    39  
    40  var (
    41  	errRootFSMismatch = errors.New("layers from manifest don't match image configuration")
    42  	errRootFSInvalid  = errors.New("invalid rootfs in image configuration")
    43  )
    44  
    45  // imageConfigPullError is an error pulling the image config blob
    46  // (only applies to schema2).
    47  type imageConfigPullError struct {
    48  	Err error
    49  }
    50  
    51  // Error returns the error string for imageConfigPullError.
    52  func (e imageConfigPullError) Error() string {
    53  	return "error pulling image configuration: " + e.Err.Error()
    54  }
    55  
    56  // newPuller returns a puller to pull from a v2 registry.
    57  func newPuller(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, config *ImagePullConfig, local ContentStore) *puller {
    58  	return &puller{
    59  		metadataService: metadata.NewV2MetadataService(config.MetadataStore),
    60  		endpoint:        endpoint,
    61  		config:          config,
    62  		repoInfo:        repoInfo,
    63  		manifestStore: &manifestStore{
    64  			local: local,
    65  		},
    66  	}
    67  }
    68  
    69  type puller struct {
    70  	metadataService metadata.V2MetadataService
    71  	endpoint        registry.APIEndpoint
    72  	config          *ImagePullConfig
    73  	repoInfo        *registry.RepositoryInfo
    74  	repo            distribution.Repository
    75  	manifestStore   *manifestStore
    76  }
    77  
    78  func (p *puller) pull(ctx context.Context, ref reference.Named) (err error) {
    79  	// TODO(tiborvass): was ReceiveTimeout
    80  	p.repo, err = newRepository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull")
    81  	if err != nil {
    82  		logrus.Warnf("Error getting v2 registry: %v", err)
    83  		return err
    84  	}
    85  
    86  	p.manifestStore.remote, err = p.repo.Manifests(ctx)
    87  	if err != nil {
    88  		return err
    89  	}
    90  
    91  	if err = p.pullRepository(ctx, ref); err != nil {
    92  		if _, ok := err.(fallbackError); ok {
    93  			return err
    94  		}
    95  		if continueOnError(err, p.endpoint.Mirror) {
    96  			return fallbackError{
    97  				err:         err,
    98  				transportOK: true,
    99  			}
   100  		}
   101  	}
   102  	return err
   103  }
   104  
   105  func (p *puller) pullRepository(ctx context.Context, ref reference.Named) (err error) {
   106  	var layersDownloaded bool
   107  	if !reference.IsNameOnly(ref) {
   108  		layersDownloaded, err = p.pullTag(ctx, ref, p.config.Platform)
   109  		if err != nil {
   110  			return err
   111  		}
   112  	} else {
   113  		tags, err := p.repo.Tags(ctx).All(ctx)
   114  		if err != nil {
   115  			return err
   116  		}
   117  
   118  		for _, tag := range tags {
   119  			tagRef, err := reference.WithTag(ref, tag)
   120  			if err != nil {
   121  				return err
   122  			}
   123  			pulledNew, err := p.pullTag(ctx, tagRef, p.config.Platform)
   124  			if err != nil {
   125  				// Since this is the pull-all-tags case, don't
   126  				// allow an error pulling a particular tag to
   127  				// make the whole pull fall back to v1.
   128  				if fallbackErr, ok := err.(fallbackError); ok {
   129  					return fallbackErr.err
   130  				}
   131  				return err
   132  			}
   133  			// pulledNew is true if either new layers were downloaded OR if existing images were newly tagged
   134  			// TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus?
   135  			layersDownloaded = layersDownloaded || pulledNew
   136  		}
   137  	}
   138  
   139  	p.writeStatus(reference.FamiliarString(ref), layersDownloaded)
   140  
   141  	return nil
   142  }
   143  
   144  // writeStatus writes a status message to out. If layersDownloaded is true, the
   145  // status message indicates that a newer image was downloaded. Otherwise, it
   146  // indicates that the image is up to date. requestedTag is the tag the message
   147  // will refer to.
   148  func (p *puller) writeStatus(requestedTag string, layersDownloaded bool) {
   149  	if layersDownloaded {
   150  		progress.Message(p.config.ProgressOutput, "", "Status: Downloaded newer image for "+requestedTag)
   151  	} else {
   152  		progress.Message(p.config.ProgressOutput, "", "Status: Image is up to date for "+requestedTag)
   153  	}
   154  }
   155  
   156  type layerDescriptor struct {
   157  	digest          digest.Digest
   158  	diffID          layer.DiffID
   159  	repoInfo        *registry.RepositoryInfo
   160  	repo            distribution.Repository
   161  	metadataService metadata.V2MetadataService
   162  	tmpFile         *os.File
   163  	verifier        digest.Verifier
   164  	src             distribution.Descriptor
   165  }
   166  
   167  func (ld *layerDescriptor) Key() string {
   168  	return "v2:" + ld.digest.String()
   169  }
   170  
   171  func (ld *layerDescriptor) ID() string {
   172  	return stringid.TruncateID(ld.digest.String())
   173  }
   174  
   175  func (ld *layerDescriptor) DiffID() (layer.DiffID, error) {
   176  	if ld.diffID != "" {
   177  		return ld.diffID, nil
   178  	}
   179  	return ld.metadataService.GetDiffID(ld.digest)
   180  }
   181  
   182  func (ld *layerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
   183  	logrus.Debugf("pulling blob %q", ld.digest)
   184  
   185  	var (
   186  		err    error
   187  		offset int64
   188  	)
   189  
   190  	if ld.tmpFile == nil {
   191  		ld.tmpFile, err = createDownloadFile()
   192  		if err != nil {
   193  			return nil, 0, xfer.DoNotRetry{Err: err}
   194  		}
   195  	} else {
   196  		offset, err = ld.tmpFile.Seek(0, io.SeekEnd)
   197  		if err != nil {
   198  			logrus.Debugf("error seeking to end of download file: %v", err)
   199  			offset = 0
   200  
   201  			ld.tmpFile.Close()
   202  			if err := os.Remove(ld.tmpFile.Name()); err != nil {
   203  				logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
   204  			}
   205  			ld.tmpFile, err = createDownloadFile()
   206  			if err != nil {
   207  				return nil, 0, xfer.DoNotRetry{Err: err}
   208  			}
   209  		} else if offset != 0 {
   210  			logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset)
   211  		}
   212  	}
   213  
   214  	tmpFile := ld.tmpFile
   215  
   216  	layerDownload, err := ld.open(ctx)
   217  	if err != nil {
   218  		logrus.Errorf("Error initiating layer download: %v", err)
   219  		return nil, 0, retryOnError(err)
   220  	}
   221  
   222  	if offset != 0 {
   223  		_, err := layerDownload.Seek(offset, io.SeekStart)
   224  		if err != nil {
   225  			if err := ld.truncateDownloadFile(); err != nil {
   226  				return nil, 0, xfer.DoNotRetry{Err: err}
   227  			}
   228  			return nil, 0, err
   229  		}
   230  	}
   231  	size, err := layerDownload.Seek(0, io.SeekEnd)
   232  	if err != nil {
   233  		// Seek failed, perhaps because there was no Content-Length
   234  		// header. This shouldn't fail the download, because we can
   235  		// still continue without a progress bar.
   236  		size = 0
   237  	} else {
   238  		if size != 0 && offset > size {
   239  			logrus.Debug("Partial download is larger than full blob. Starting over")
   240  			offset = 0
   241  			if err := ld.truncateDownloadFile(); err != nil {
   242  				return nil, 0, xfer.DoNotRetry{Err: err}
   243  			}
   244  		}
   245  
   246  		// Restore the seek offset either at the beginning of the
   247  		// stream, or just after the last byte we have from previous
   248  		// attempts.
   249  		_, err = layerDownload.Seek(offset, io.SeekStart)
   250  		if err != nil {
   251  			return nil, 0, err
   252  		}
   253  	}
   254  
   255  	reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading")
   256  	defer reader.Close()
   257  
   258  	if ld.verifier == nil {
   259  		ld.verifier = ld.digest.Verifier()
   260  	}
   261  
   262  	_, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier))
   263  	if err != nil {
   264  		if err == transport.ErrWrongCodeForByteRange {
   265  			if err := ld.truncateDownloadFile(); err != nil {
   266  				return nil, 0, xfer.DoNotRetry{Err: err}
   267  			}
   268  			return nil, 0, err
   269  		}
   270  		return nil, 0, retryOnError(err)
   271  	}
   272  
   273  	progress.Update(progressOutput, ld.ID(), "Verifying Checksum")
   274  
   275  	if !ld.verifier.Verified() {
   276  		err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest)
   277  		logrus.Error(err)
   278  
   279  		// Allow a retry if this digest verification error happened
   280  		// after a resumed download.
   281  		if offset != 0 {
   282  			if err := ld.truncateDownloadFile(); err != nil {
   283  				return nil, 0, xfer.DoNotRetry{Err: err}
   284  			}
   285  
   286  			return nil, 0, err
   287  		}
   288  		return nil, 0, xfer.DoNotRetry{Err: err}
   289  	}
   290  
   291  	progress.Update(progressOutput, ld.ID(), "Download complete")
   292  
   293  	logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name())
   294  
   295  	_, err = tmpFile.Seek(0, io.SeekStart)
   296  	if err != nil {
   297  		tmpFile.Close()
   298  		if err := os.Remove(tmpFile.Name()); err != nil {
   299  			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
   300  		}
   301  		ld.tmpFile = nil
   302  		ld.verifier = nil
   303  		return nil, 0, xfer.DoNotRetry{Err: err}
   304  	}
   305  
   306  	// hand off the temporary file to the download manager, so it will only
   307  	// be closed once
   308  	ld.tmpFile = nil
   309  
   310  	return ioutils.NewReadCloserWrapper(tmpFile, func() error {
   311  		tmpFile.Close()
   312  		err := os.RemoveAll(tmpFile.Name())
   313  		if err != nil {
   314  			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
   315  		}
   316  		return err
   317  	}), size, nil
   318  }
   319  
   320  func (ld *layerDescriptor) Close() {
   321  	if ld.tmpFile != nil {
   322  		ld.tmpFile.Close()
   323  		if err := os.RemoveAll(ld.tmpFile.Name()); err != nil {
   324  			logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
   325  		}
   326  	}
   327  }
   328  
   329  func (ld *layerDescriptor) truncateDownloadFile() error {
   330  	// Need a new hash context since we will be redoing the download
   331  	ld.verifier = nil
   332  
   333  	if _, err := ld.tmpFile.Seek(0, io.SeekStart); err != nil {
   334  		logrus.Errorf("error seeking to beginning of download file: %v", err)
   335  		return err
   336  	}
   337  
   338  	if err := ld.tmpFile.Truncate(0); err != nil {
   339  		logrus.Errorf("error truncating download file: %v", err)
   340  		return err
   341  	}
   342  
   343  	return nil
   344  }
   345  
   346  func (ld *layerDescriptor) Registered(diffID layer.DiffID) {
   347  	// Cache mapping from this layer's DiffID to the blobsum
   348  	_ = ld.metadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.Name.Name()})
   349  }
   350  
   351  func (p *puller) pullTag(ctx context.Context, ref reference.Named, platform *ocispec.Platform) (tagUpdated bool, err error) {
   352  	var (
   353  		tagOrDigest string // Used for logging/progress only
   354  		dgst        digest.Digest
   355  		mt          string
   356  		size        int64
   357  		tagged      reference.NamedTagged
   358  		isTagged    bool
   359  	)
   360  	if digested, isDigested := ref.(reference.Canonical); isDigested {
   361  		dgst = digested.Digest()
   362  		tagOrDigest = digested.String()
   363  	} else if tagged, isTagged = ref.(reference.NamedTagged); isTagged {
   364  		tagService := p.repo.Tags(ctx)
   365  		desc, err := tagService.Get(ctx, tagged.Tag())
   366  		if err != nil {
   367  			return false, err
   368  		}
   369  
   370  		dgst = desc.Digest
   371  		tagOrDigest = tagged.Tag()
   372  		mt = desc.MediaType
   373  		size = desc.Size
   374  	} else {
   375  		return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", reference.FamiliarString(ref))
   376  	}
   377  
   378  	ctx = log.WithLogger(ctx, logrus.WithFields(
   379  		logrus.Fields{
   380  			"digest": dgst,
   381  			"remote": ref,
   382  		}))
   383  
   384  	desc := ocispec.Descriptor{
   385  		MediaType: mt,
   386  		Digest:    dgst,
   387  		Size:      size,
   388  	}
   389  
   390  	manifest, err := p.manifestStore.Get(ctx, desc, ref)
   391  	if err != nil {
   392  		if isTagged && isNotFound(errors.Cause(err)) {
   393  			logrus.WithField("ref", ref).WithError(err).Debug("Falling back to pull manifest by tag")
   394  
   395  			msg := `%s Failed to pull manifest by the resolved digest. This registry does not
   396  	appear to conform to the distribution registry specification; falling back to
   397  	pull by tag.  This fallback is DEPRECATED, and will be removed in a future
   398  	release.  Please contact admins of %s. %s
   399  `
   400  
   401  			warnEmoji := "\U000026A0\U0000FE0F"
   402  			progress.Messagef(p.config.ProgressOutput, "WARNING", msg, warnEmoji, p.endpoint.URL, warnEmoji)
   403  
   404  			// Fetch by tag worked, but fetch by digest didn't.
   405  			// This is a broken registry implementation.
   406  			// We'll fallback to the old behavior and get the manifest by tag.
   407  			var ms distribution.ManifestService
   408  			ms, err = p.repo.Manifests(ctx)
   409  			if err != nil {
   410  				return false, err
   411  			}
   412  
   413  			manifest, err = ms.Get(ctx, "", distribution.WithTag(tagged.Tag()))
   414  			err = errors.Wrap(err, "error after falling back to get manifest by tag")
   415  		}
   416  		if err != nil {
   417  			return false, err
   418  		}
   419  	}
   420  
   421  	if manifest == nil {
   422  		return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest)
   423  	}
   424  
   425  	if m, ok := manifest.(*schema2.DeserializedManifest); ok {
   426  		if err := p.validateMediaType(m.Manifest.Config.MediaType); err != nil {
   427  			return false, err
   428  		}
   429  	}
   430  
   431  	logrus.Debugf("Pulling ref from V2 registry: %s", reference.FamiliarString(ref))
   432  	progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+reference.FamiliarName(p.repo.Named()))
   433  
   434  	var (
   435  		id             digest.Digest
   436  		manifestDigest digest.Digest
   437  	)
   438  
   439  	switch v := manifest.(type) {
   440  	case *schema1.SignedManifest:
   441  		// give registries time to upgrade to schema2 and only warn if we know a registry has been upgraded long time ago
   442  		// TODO: condition to be removed
   443  		if reference.Domain(ref) == "docker.io" {
   444  			msg := fmt.Sprintf("Image %s uses outdated schema1 manifest format. Please upgrade to a schema2 image for better future compatibility. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", ref)
   445  			logrus.Warn(msg)
   446  			progress.Message(p.config.ProgressOutput, "", msg)
   447  		}
   448  
   449  		id, manifestDigest, err = p.pullSchema1(ctx, ref, v, platform)
   450  		if err != nil {
   451  			return false, err
   452  		}
   453  	case *schema2.DeserializedManifest:
   454  		id, manifestDigest, err = p.pullSchema2(ctx, ref, v, platform)
   455  		if err != nil {
   456  			return false, err
   457  		}
   458  	case *ocischema.DeserializedManifest:
   459  		id, manifestDigest, err = p.pullOCI(ctx, ref, v, platform)
   460  		if err != nil {
   461  			return false, err
   462  		}
   463  	case *manifestlist.DeserializedManifestList:
   464  		id, manifestDigest, err = p.pullManifestList(ctx, ref, v, platform)
   465  		if err != nil {
   466  			return false, err
   467  		}
   468  	default:
   469  		return false, invalidManifestFormatError{}
   470  	}
   471  
   472  	progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())
   473  
   474  	if p.config.ReferenceStore != nil {
   475  		oldTagID, err := p.config.ReferenceStore.Get(ref)
   476  		if err == nil {
   477  			if oldTagID == id {
   478  				return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id)
   479  			}
   480  		} else if err != refstore.ErrDoesNotExist {
   481  			return false, err
   482  		}
   483  
   484  		if canonical, ok := ref.(reference.Canonical); ok {
   485  			if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil {
   486  				return false, err
   487  			}
   488  		} else {
   489  			if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil {
   490  				return false, err
   491  			}
   492  			if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil {
   493  				return false, err
   494  			}
   495  		}
   496  	}
   497  	return true, nil
   498  }
   499  
   500  // validateMediaType validates if the given mediaType is accepted by the puller's
   501  // configuration.
   502  func (p *puller) validateMediaType(mediaType string) error {
   503  	var allowedMediaTypes []string
   504  	if len(p.config.Schema2Types) > 0 {
   505  		allowedMediaTypes = p.config.Schema2Types
   506  	} else {
   507  		allowedMediaTypes = defaultImageTypes
   508  	}
   509  	for _, t := range allowedMediaTypes {
   510  		if mediaType == t {
   511  			return nil
   512  		}
   513  	}
   514  
   515  	configClass := mediaTypeClasses[mediaType]
   516  	if configClass == "" {
   517  		configClass = "unknown"
   518  	}
   519  	return invalidManifestClassError{mediaType, configClass}
   520  }
   521  
   522  func (p *puller) pullSchema1(ctx context.Context, ref reference.Reference, unverifiedManifest *schema1.SignedManifest, platform *ocispec.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) {
   523  	if platform != nil {
   524  		// Early bath if the requested OS doesn't match that of the configuration.
   525  		// This avoids doing the download, only to potentially fail later.
   526  		if !system.IsOSSupported(platform.OS) {
   527  			return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", runtime.GOOS, platform.OS)
   528  		}
   529  	}
   530  
   531  	var verifiedManifest *schema1.Manifest
   532  	verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref)
   533  	if err != nil {
   534  		return "", "", err
   535  	}
   536  
   537  	rootFS := image.NewRootFS()
   538  
   539  	// remove duplicate layers and check parent chain validity
   540  	err = fixManifestLayers(verifiedManifest)
   541  	if err != nil {
   542  		return "", "", err
   543  	}
   544  
   545  	var descriptors []xfer.DownloadDescriptor
   546  
   547  	// Image history converted to the new format
   548  	var history []image.History
   549  
   550  	// Note that the order of this loop is in the direction of bottom-most
   551  	// to top-most, so that the downloads slice gets ordered correctly.
   552  	for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- {
   553  		blobSum := verifiedManifest.FSLayers[i].BlobSum
   554  		if err = blobSum.Validate(); err != nil {
   555  			return "", "", errors.Wrapf(err, "could not validate layer digest %q", blobSum)
   556  		}
   557  
   558  		var throwAway struct {
   559  			ThrowAway bool `json:"throwaway,omitempty"`
   560  		}
   561  		if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil {
   562  			return "", "", err
   563  		}
   564  
   565  		h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway)
   566  		if err != nil {
   567  			return "", "", err
   568  		}
   569  		history = append(history, h)
   570  
   571  		if throwAway.ThrowAway {
   572  			continue
   573  		}
   574  
   575  		layerDescriptor := &layerDescriptor{
   576  			digest:          blobSum,
   577  			repoInfo:        p.repoInfo,
   578  			repo:            p.repo,
   579  			metadataService: p.metadataService,
   580  		}
   581  
   582  		descriptors = append(descriptors, layerDescriptor)
   583  	}
   584  
   585  	resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput)
   586  	if err != nil {
   587  		return "", "", err
   588  	}
   589  	defer release()
   590  
   591  	config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history)
   592  	if err != nil {
   593  		return "", "", err
   594  	}
   595  
   596  	imageID, err := p.config.ImageStore.Put(ctx, config)
   597  	if err != nil {
   598  		return "", "", err
   599  	}
   600  
   601  	manifestDigest = digest.FromBytes(unverifiedManifest.Canonical)
   602  
   603  	return imageID, manifestDigest, nil
   604  }
   605  
   606  func checkSupportedMediaType(mediaType string) error {
   607  	lowerMt := strings.ToLower(mediaType)
   608  	for _, mt := range supportedMediaTypes {
   609  		// The should either be an exact match, or have a valid prefix
   610  		// we append a "." when matching prefixes to exclude "false positives";
   611  		// for example, we don't want to match "application/vnd.oci.images_are_fun_yolo".
   612  		if lowerMt == mt || strings.HasPrefix(lowerMt, mt+".") {
   613  			return nil
   614  		}
   615  	}
   616  	return unsupportedMediaTypeError{MediaType: mediaType}
   617  }
   618  
   619  func (p *puller) pullSchema2Layers(ctx context.Context, target distribution.Descriptor, layers []distribution.Descriptor, platform *ocispec.Platform) (id digest.Digest, err error) {
   620  	if _, err := p.config.ImageStore.Get(ctx, target.Digest); err == nil {
   621  		// If the image already exists locally, no need to pull
   622  		// anything.
   623  		return target.Digest, nil
   624  	}
   625  
   626  	if err := checkSupportedMediaType(target.MediaType); err != nil {
   627  		return "", err
   628  	}
   629  
   630  	var descriptors []xfer.DownloadDescriptor
   631  
   632  	// Note that the order of this loop is in the direction of bottom-most
   633  	// to top-most, so that the downloads slice gets ordered correctly.
   634  	for _, d := range layers {
   635  		if err := d.Digest.Validate(); err != nil {
   636  			return "", errors.Wrapf(err, "could not validate layer digest %q", d.Digest)
   637  		}
   638  		if err := checkSupportedMediaType(d.MediaType); err != nil {
   639  			return "", err
   640  		}
   641  		layerDescriptor := &layerDescriptor{
   642  			digest:          d.Digest,
   643  			repo:            p.repo,
   644  			repoInfo:        p.repoInfo,
   645  			metadataService: p.metadataService,
   646  			src:             d,
   647  		}
   648  
   649  		descriptors = append(descriptors, layerDescriptor)
   650  	}
   651  
   652  	configChan := make(chan []byte, 1)
   653  	configErrChan := make(chan error, 1)
   654  	layerErrChan := make(chan error, 1)
   655  	downloadsDone := make(chan struct{})
   656  	var cancel func()
   657  	ctx, cancel = context.WithCancel(ctx)
   658  	defer cancel()
   659  
   660  	// Pull the image config
   661  	go func() {
   662  		configJSON, err := p.pullSchema2Config(ctx, target.Digest)
   663  		if err != nil {
   664  			configErrChan <- imageConfigPullError{Err: err}
   665  			cancel()
   666  			return
   667  		}
   668  		configChan <- configJSON
   669  	}()
   670  
   671  	var (
   672  		configJSON       []byte            // raw serialized image config
   673  		downloadedRootFS *image.RootFS     // rootFS from registered layers
   674  		configRootFS     *image.RootFS     // rootFS from configuration
   675  		release          func()            // release resources from rootFS download
   676  		configPlatform   *ocispec.Platform // for LCOW when registering downloaded layers
   677  	)
   678  
   679  	layerStoreOS := runtime.GOOS
   680  	if platform != nil {
   681  		layerStoreOS = platform.OS
   682  	}
   683  
   684  	// https://github.com/docker/docker/issues/24766 - Err on the side of caution,
   685  	// explicitly blocking images intended for linux from the Windows daemon. On
   686  	// Windows, we do this before the attempt to download, effectively serialising
   687  	// the download slightly slowing it down. We have to do it this way, as
   688  	// chances are the download of layers itself would fail due to file names
   689  	// which aren't suitable for NTFS. At some point in the future, if a similar
   690  	// check to block Windows images being pulled on Linux is implemented, it
   691  	// may be necessary to perform the same type of serialisation.
   692  	if runtime.GOOS == "windows" {
   693  		configJSON, configRootFS, configPlatform, err = receiveConfig(configChan, configErrChan)
   694  		if err != nil {
   695  			return "", err
   696  		}
   697  		if configRootFS == nil {
   698  			return "", errRootFSInvalid
   699  		}
   700  		if err := checkImageCompatibility(configPlatform.OS, configPlatform.OSVersion); err != nil {
   701  			return "", err
   702  		}
   703  
   704  		if len(descriptors) != len(configRootFS.DiffIDs) {
   705  			return "", errRootFSMismatch
   706  		}
   707  		if platform == nil {
   708  			// Early bath if the requested OS doesn't match that of the configuration.
   709  			// This avoids doing the download, only to potentially fail later.
   710  			if !system.IsOSSupported(configPlatform.OS) {
   711  				return "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configPlatform.OS, layerStoreOS)
   712  			}
   713  			layerStoreOS = configPlatform.OS
   714  		}
   715  
   716  		// Populate diff ids in descriptors to avoid downloading foreign layers
   717  		// which have been side loaded
   718  		for i := range descriptors {
   719  			descriptors[i].(*layerDescriptor).diffID = configRootFS.DiffIDs[i]
   720  		}
   721  	}
   722  
   723  	// Assume that the operating system is the host OS if blank, and validate it
   724  	// to ensure we don't cause a panic by an invalid index into the layerstores.
   725  	if layerStoreOS != "" && !system.IsOSSupported(layerStoreOS) {
   726  		return "", system.ErrNotSupportedOperatingSystem
   727  	}
   728  
   729  	if p.config.DownloadManager != nil {
   730  		go func() {
   731  			var (
   732  				err    error
   733  				rootFS image.RootFS
   734  			)
   735  			downloadRootFS := *image.NewRootFS()
   736  			rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput)
   737  			if err != nil {
   738  				// Intentionally do not cancel the config download here
   739  				// as the error from config download (if there is one)
   740  				// is more interesting than the layer download error
   741  				layerErrChan <- err
   742  				return
   743  			}
   744  
   745  			downloadedRootFS = &rootFS
   746  			close(downloadsDone)
   747  		}()
   748  	} else {
   749  		// We have nothing to download
   750  		close(downloadsDone)
   751  	}
   752  
   753  	if configJSON == nil {
   754  		configJSON, configRootFS, _, err = receiveConfig(configChan, configErrChan)
   755  		if err == nil && configRootFS == nil {
   756  			err = errRootFSInvalid
   757  		}
   758  		if err != nil {
   759  			cancel()
   760  			select {
   761  			case <-downloadsDone:
   762  			case <-layerErrChan:
   763  			}
   764  			return "", err
   765  		}
   766  	}
   767  
   768  	select {
   769  	case <-downloadsDone:
   770  	case err = <-layerErrChan:
   771  		return "", err
   772  	}
   773  
   774  	if release != nil {
   775  		defer release()
   776  	}
   777  
   778  	if downloadedRootFS != nil {
   779  		// The DiffIDs returned in rootFS MUST match those in the config.
   780  		// Otherwise the image config could be referencing layers that aren't
   781  		// included in the manifest.
   782  		if len(downloadedRootFS.DiffIDs) != len(configRootFS.DiffIDs) {
   783  			return "", errRootFSMismatch
   784  		}
   785  
   786  		for i := range downloadedRootFS.DiffIDs {
   787  			if downloadedRootFS.DiffIDs[i] != configRootFS.DiffIDs[i] {
   788  				return "", errRootFSMismatch
   789  			}
   790  		}
   791  	}
   792  
   793  	imageID, err := p.config.ImageStore.Put(ctx, configJSON)
   794  	if err != nil {
   795  		return "", err
   796  	}
   797  
   798  	return imageID, nil
   799  }
   800  
   801  func (p *puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest, platform *ocispec.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) {
   802  	manifestDigest, err = schema2ManifestDigest(ref, mfst)
   803  	if err != nil {
   804  		return "", "", err
   805  	}
   806  	id, err = p.pullSchema2Layers(ctx, mfst.Target(), mfst.Layers, platform)
   807  	return id, manifestDigest, err
   808  }
   809  
   810  func (p *puller) pullOCI(ctx context.Context, ref reference.Named, mfst *ocischema.DeserializedManifest, platform *ocispec.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) {
   811  	manifestDigest, err = schema2ManifestDigest(ref, mfst)
   812  	if err != nil {
   813  		return "", "", err
   814  	}
   815  	id, err = p.pullSchema2Layers(ctx, mfst.Target(), mfst.Layers, platform)
   816  	return id, manifestDigest, err
   817  }
   818  
   819  func receiveConfig(configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, *ocispec.Platform, error) {
   820  	select {
   821  	case configJSON := <-configChan:
   822  		rootfs, err := rootFSFromConfig(configJSON)
   823  		if err != nil {
   824  			return nil, nil, nil, err
   825  		}
   826  		platform, err := platformFromConfig(configJSON)
   827  		if err != nil {
   828  			return nil, nil, nil, err
   829  		}
   830  		return configJSON, rootfs, platform, nil
   831  	case err := <-errChan:
   832  		return nil, nil, nil, err
   833  		// Don't need a case for ctx.Done in the select because cancellation
   834  		// will trigger an error in p.pullSchema2ImageConfig.
   835  	}
   836  }
   837  
   838  // pullManifestList handles "manifest lists" which point to various
   839  // platform-specific manifests.
   840  func (p *puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList, pp *ocispec.Platform) (id digest.Digest, manifestListDigest digest.Digest, err error) {
   841  	manifestListDigest, err = schema2ManifestDigest(ref, mfstList)
   842  	if err != nil {
   843  		return "", "", err
   844  	}
   845  
   846  	var platform ocispec.Platform
   847  	if pp != nil {
   848  		platform = *pp
   849  	}
   850  	logrus.Debugf("%s resolved to a manifestList object with %d entries; looking for a %s match", ref, len(mfstList.Manifests), platforms.Format(platform))
   851  
   852  	manifestMatches := filterManifests(mfstList.Manifests, platform)
   853  
   854  	for _, match := range manifestMatches {
   855  		if err := checkImageCompatibility(match.Platform.OS, match.Platform.OSVersion); err != nil {
   856  			return "", "", err
   857  		}
   858  
   859  		desc := ocispec.Descriptor{
   860  			Digest:    match.Digest,
   861  			Size:      match.Size,
   862  			MediaType: match.MediaType,
   863  		}
   864  		manifest, err := p.manifestStore.Get(ctx, desc, ref)
   865  		if err != nil {
   866  			return "", "", err
   867  		}
   868  
   869  		manifestRef, err := reference.WithDigest(reference.TrimNamed(ref), match.Digest)
   870  		if err != nil {
   871  			return "", "", err
   872  		}
   873  
   874  		switch v := manifest.(type) {
   875  		case *schema1.SignedManifest:
   876  			msg := fmt.Sprintf("[DEPRECATION NOTICE] v2 schema1 manifests in manifest lists are not supported and will break in a future release. Suggest author of %s to upgrade to v2 schema2. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", ref)
   877  			logrus.Warn(msg)
   878  			progress.Message(p.config.ProgressOutput, "", msg)
   879  
   880  			platform := toOCIPlatform(match.Platform)
   881  			id, _, err = p.pullSchema1(ctx, manifestRef, v, platform)
   882  			if err != nil {
   883  				return "", "", err
   884  			}
   885  		case *schema2.DeserializedManifest:
   886  			platform := toOCIPlatform(match.Platform)
   887  			id, _, err = p.pullSchema2(ctx, manifestRef, v, platform)
   888  			if err != nil {
   889  				return "", "", err
   890  			}
   891  		case *ocischema.DeserializedManifest:
   892  			platform := toOCIPlatform(match.Platform)
   893  			id, _, err = p.pullOCI(ctx, manifestRef, v, platform)
   894  			if err != nil {
   895  				return "", "", err
   896  			}
   897  		case *manifestlist.DeserializedManifestList:
   898  			id, _, err = p.pullManifestList(ctx, manifestRef, v, pp)
   899  			if err != nil {
   900  				var noMatches noMatchesErr
   901  				if !errors.As(err, &noMatches) {
   902  					// test the next match
   903  					continue
   904  				}
   905  			}
   906  		default:
   907  			// OCI spec requires to skip unknown manifest types
   908  			continue
   909  		}
   910  		return id, manifestListDigest, err
   911  	}
   912  	return "", "", noMatchesErr{platform: platform}
   913  }
   914  
   915  const (
   916  	defaultSchemaPullBackoff     = 250 * time.Millisecond
   917  	defaultMaxSchemaPullAttempts = 5
   918  )
   919  
   920  func (p *puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) {
   921  	blobs := p.repo.Blobs(ctx)
   922  	err = retry(ctx, defaultMaxSchemaPullAttempts, defaultSchemaPullBackoff, func(ctx context.Context) (err error) {
   923  		configJSON, err = blobs.Get(ctx, dgst)
   924  		return err
   925  	})
   926  	if err != nil {
   927  		return nil, err
   928  	}
   929  
   930  	// Verify image config digest
   931  	verifier := dgst.Verifier()
   932  	if _, err := verifier.Write(configJSON); err != nil {
   933  		return nil, err
   934  	}
   935  	if !verifier.Verified() {
   936  		err := fmt.Errorf("image config verification failed for digest %s", dgst)
   937  		logrus.Error(err)
   938  		return nil, err
   939  	}
   940  
   941  	return configJSON, nil
   942  }
   943  
   944  type noMatchesErr struct {
   945  	platform ocispec.Platform
   946  }
   947  
   948  func (e noMatchesErr) Error() string {
   949  	return fmt.Sprintf("no matching manifest for %s in the manifest list entries", formatPlatform(e.platform))
   950  }
   951  
   952  func retry(ctx context.Context, maxAttempts int, sleep time.Duration, f func(ctx context.Context) error) (err error) {
   953  	attempt := 0
   954  	for ; attempt < maxAttempts; attempt++ {
   955  		err = retryOnError(f(ctx))
   956  		if err == nil {
   957  			return nil
   958  		}
   959  		if xfer.IsDoNotRetryError(err) {
   960  			break
   961  		}
   962  
   963  		if attempt+1 < maxAttempts {
   964  			timer := time.NewTimer(sleep)
   965  			select {
   966  			case <-ctx.Done():
   967  				timer.Stop()
   968  				return ctx.Err()
   969  			case <-timer.C:
   970  				logrus.WithError(err).WithField("attempts", attempt+1).Debug("retrying after error")
   971  				sleep *= 2
   972  			}
   973  		}
   974  	}
   975  	return errors.Wrapf(err, "download failed after attempts=%d", attempt+1)
   976  }
   977  
   978  // schema2ManifestDigest computes the manifest digest, and, if pulling by
   979  // digest, ensures that it matches the requested digest.
   980  func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) {
   981  	_, canonical, err := mfst.Payload()
   982  	if err != nil {
   983  		return "", err
   984  	}
   985  
   986  	// If pull by digest, then verify the manifest digest.
   987  	if digested, isDigested := ref.(reference.Canonical); isDigested {
   988  		verifier := digested.Digest().Verifier()
   989  		if _, err := verifier.Write(canonical); err != nil {
   990  			return "", err
   991  		}
   992  		if !verifier.Verified() {
   993  			err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest())
   994  			logrus.Error(err)
   995  			return "", err
   996  		}
   997  		return digested.Digest(), nil
   998  	}
   999  
  1000  	return digest.FromBytes(canonical), nil
  1001  }
  1002  
  1003  func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Reference) (m *schema1.Manifest, err error) {
  1004  	// If pull by digest, then verify the manifest digest. NOTE: It is
  1005  	// important to do this first, before any other content validation. If the
  1006  	// digest cannot be verified, don't even bother with those other things.
  1007  	if digested, isCanonical := ref.(reference.Canonical); isCanonical {
  1008  		verifier := digested.Digest().Verifier()
  1009  		if _, err := verifier.Write(signedManifest.Canonical); err != nil {
  1010  			return nil, err
  1011  		}
  1012  		if !verifier.Verified() {
  1013  			err := fmt.Errorf("image verification failed for digest %s", digested.Digest())
  1014  			logrus.Error(err)
  1015  			return nil, err
  1016  		}
  1017  	}
  1018  	m = &signedManifest.Manifest
  1019  
  1020  	if m.SchemaVersion != 1 {
  1021  		return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, reference.FamiliarString(ref))
  1022  	}
  1023  	if len(m.FSLayers) != len(m.History) {
  1024  		return nil, fmt.Errorf("length of history not equal to number of layers for %q", reference.FamiliarString(ref))
  1025  	}
  1026  	if len(m.FSLayers) == 0 {
  1027  		return nil, fmt.Errorf("no FSLayers in manifest for %q", reference.FamiliarString(ref))
  1028  	}
  1029  	return m, nil
  1030  }
  1031  
  1032  // fixManifestLayers removes repeated layers from the manifest and checks the
  1033  // correctness of the parent chain.
  1034  func fixManifestLayers(m *schema1.Manifest) error {
  1035  	imgs := make([]*image.V1Image, len(m.FSLayers))
  1036  	for i := range m.FSLayers {
  1037  		img := &image.V1Image{}
  1038  
  1039  		if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil {
  1040  			return err
  1041  		}
  1042  
  1043  		imgs[i] = img
  1044  		if err := v1.ValidateID(img.ID); err != nil {
  1045  			return err
  1046  		}
  1047  	}
  1048  
  1049  	if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" {
  1050  		// Windows base layer can point to a base layer parent that is not in manifest.
  1051  		return errors.New("invalid parent ID in the base layer of the image")
  1052  	}
  1053  
  1054  	// check general duplicates to error instead of a deadlock
  1055  	idmap := make(map[string]struct{})
  1056  
  1057  	var lastID string
  1058  	for _, img := range imgs {
  1059  		// skip IDs that appear after each other, we handle those later
  1060  		if _, exists := idmap[img.ID]; img.ID != lastID && exists {
  1061  			return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
  1062  		}
  1063  		lastID = img.ID
  1064  		idmap[lastID] = struct{}{}
  1065  	}
  1066  
  1067  	// backwards loop so that we keep the remaining indexes after removing items
  1068  	for i := len(imgs) - 2; i >= 0; i-- {
  1069  		if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
  1070  			m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
  1071  			m.History = append(m.History[:i], m.History[i+1:]...)
  1072  		} else if imgs[i].Parent != imgs[i+1].ID {
  1073  			return fmt.Errorf("invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent)
  1074  		}
  1075  	}
  1076  
  1077  	return nil
  1078  }
  1079  
  1080  func createDownloadFile() (*os.File, error) {
  1081  	return os.CreateTemp("", "GetImageBlob")
  1082  }
  1083  
  1084  func toOCIPlatform(p manifestlist.PlatformSpec) *ocispec.Platform {
  1085  	// distribution pkg does define platform as pointer so this hack for empty struct
  1086  	// is necessary. This is temporary until correct OCI image-spec package is used.
  1087  	if p.OS == "" && p.Architecture == "" && p.Variant == "" && p.OSVersion == "" && p.OSFeatures == nil && p.Features == nil {
  1088  		return nil
  1089  	}
  1090  	return &ocispec.Platform{
  1091  		OS:           p.OS,
  1092  		Architecture: p.Architecture,
  1093  		Variant:      p.Variant,
  1094  		OSFeatures:   p.OSFeatures,
  1095  		OSVersion:    p.OSVersion,
  1096  	}
  1097  }
  1098  
  1099  // maximumSpec returns the distribution platform with maximum compatibility for the current node.
  1100  func maximumSpec() ocispec.Platform {
  1101  	p := platforms.DefaultSpec()
  1102  	if p.Architecture == "amd64" {
  1103  		p.Variant = archvariant.AMD64Variant()
  1104  	}
  1105  	return p
  1106  }