github.com/lacework-dev/go-moby@v20.10.12+incompatible/distribution/pull_v2.go (about)

     1  package distribution // import "github.com/docker/docker/distribution"
     2  
     3  import (
     4  	"context"
     5  	"encoding/json"
     6  	"fmt"
     7  	"io"
     8  	"io/ioutil"
     9  	"net/url"
    10  	"os"
    11  	"runtime"
    12  	"strings"
    13  
    14  	"github.com/containerd/containerd/log"
    15  	"github.com/containerd/containerd/platforms"
    16  	"github.com/docker/distribution"
    17  	"github.com/docker/distribution/manifest/manifestlist"
    18  	"github.com/docker/distribution/manifest/ocischema"
    19  	"github.com/docker/distribution/manifest/schema1"
    20  	"github.com/docker/distribution/manifest/schema2"
    21  	"github.com/docker/distribution/reference"
    22  	"github.com/docker/distribution/registry/api/errcode"
    23  	"github.com/docker/distribution/registry/client/auth"
    24  	"github.com/docker/distribution/registry/client/transport"
    25  	"github.com/docker/docker/distribution/metadata"
    26  	"github.com/docker/docker/distribution/xfer"
    27  	"github.com/docker/docker/image"
    28  	v1 "github.com/docker/docker/image/v1"
    29  	"github.com/docker/docker/layer"
    30  	"github.com/docker/docker/pkg/ioutils"
    31  	"github.com/docker/docker/pkg/progress"
    32  	"github.com/docker/docker/pkg/stringid"
    33  	"github.com/docker/docker/pkg/system"
    34  	refstore "github.com/docker/docker/reference"
    35  	"github.com/docker/docker/registry"
    36  	digest "github.com/opencontainers/go-digest"
    37  	specs "github.com/opencontainers/image-spec/specs-go/v1"
    38  	"github.com/pkg/errors"
    39  	"github.com/sirupsen/logrus"
    40  )
    41  
    42  var (
    43  	errRootFSMismatch = errors.New("layers from manifest don't match image configuration")
    44  	errRootFSInvalid  = errors.New("invalid rootfs in image configuration")
    45  )
    46  
    47  // ImageConfigPullError is an error pulling the image config blob
    48  // (only applies to schema2).
    49  type ImageConfigPullError struct {
    50  	Err error
    51  }
    52  
    53  // Error returns the error string for ImageConfigPullError.
    54  func (e ImageConfigPullError) Error() string {
    55  	return "error pulling image configuration: " + e.Err.Error()
    56  }
    57  
    58  type v2Puller struct {
    59  	V2MetadataService metadata.V2MetadataService
    60  	endpoint          registry.APIEndpoint
    61  	config            *ImagePullConfig
    62  	repoInfo          *registry.RepositoryInfo
    63  	repo              distribution.Repository
    64  	// confirmedV2 is set to true if we confirm we're talking to a v2
    65  	// registry. This is used to limit fallbacks to the v1 protocol.
    66  	confirmedV2   bool
    67  	manifestStore *manifestStore
    68  }
    69  
    70  func (p *v2Puller) Pull(ctx context.Context, ref reference.Named, platform *specs.Platform) (err error) {
    71  	// TODO(tiborvass): was ReceiveTimeout
    72  	p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull")
    73  	if err != nil {
    74  		logrus.Warnf("Error getting v2 registry: %v", err)
    75  		return err
    76  	}
    77  
    78  	p.manifestStore.remote, err = p.repo.Manifests(ctx)
    79  	if err != nil {
    80  		return err
    81  	}
    82  
    83  	if err = p.pullV2Repository(ctx, ref, platform); err != nil {
    84  		if _, ok := err.(fallbackError); ok {
    85  			return err
    86  		}
    87  		if continueOnError(err, p.endpoint.Mirror) {
    88  			return fallbackError{
    89  				err:         err,
    90  				confirmedV2: p.confirmedV2,
    91  				transportOK: true,
    92  			}
    93  		}
    94  	}
    95  	return err
    96  }
    97  
    98  func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named, platform *specs.Platform) (err error) {
    99  	var layersDownloaded bool
   100  	if !reference.IsNameOnly(ref) {
   101  		layersDownloaded, err = p.pullV2Tag(ctx, ref, platform)
   102  		if err != nil {
   103  			return err
   104  		}
   105  	} else {
   106  		tags, err := p.repo.Tags(ctx).All(ctx)
   107  		if err != nil {
   108  			// If this repository doesn't exist on V2, we should
   109  			// permit a fallback to V1.
   110  			return allowV1Fallback(err)
   111  		}
   112  
   113  		// The v2 registry knows about this repository, so we will not
   114  		// allow fallback to the v1 protocol even if we encounter an
   115  		// error later on.
   116  		p.confirmedV2 = true
   117  
   118  		for _, tag := range tags {
   119  			tagRef, err := reference.WithTag(ref, tag)
   120  			if err != nil {
   121  				return err
   122  			}
   123  			pulledNew, err := p.pullV2Tag(ctx, tagRef, platform)
   124  			if err != nil {
   125  				// Since this is the pull-all-tags case, don't
   126  				// allow an error pulling a particular tag to
   127  				// make the whole pull fall back to v1.
   128  				if fallbackErr, ok := err.(fallbackError); ok {
   129  					return fallbackErr.err
   130  				}
   131  				return err
   132  			}
   133  			// pulledNew is true if either new layers were downloaded OR if existing images were newly tagged
   134  			// TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus?
   135  			layersDownloaded = layersDownloaded || pulledNew
   136  		}
   137  	}
   138  
   139  	writeStatus(reference.FamiliarString(ref), p.config.ProgressOutput, layersDownloaded)
   140  
   141  	return nil
   142  }
   143  
   144  type v2LayerDescriptor struct {
   145  	digest            digest.Digest
   146  	diffID            layer.DiffID
   147  	repoInfo          *registry.RepositoryInfo
   148  	repo              distribution.Repository
   149  	V2MetadataService metadata.V2MetadataService
   150  	tmpFile           *os.File
   151  	verifier          digest.Verifier
   152  	src               distribution.Descriptor
   153  }
   154  
   155  func (ld *v2LayerDescriptor) Key() string {
   156  	return "v2:" + ld.digest.String()
   157  }
   158  
   159  func (ld *v2LayerDescriptor) ID() string {
   160  	return stringid.TruncateID(ld.digest.String())
   161  }
   162  
   163  func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) {
   164  	if ld.diffID != "" {
   165  		return ld.diffID, nil
   166  	}
   167  	return ld.V2MetadataService.GetDiffID(ld.digest)
   168  }
   169  
   170  func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
   171  	logrus.Debugf("pulling blob %q", ld.digest)
   172  
   173  	var (
   174  		err    error
   175  		offset int64
   176  	)
   177  
   178  	if ld.tmpFile == nil {
   179  		ld.tmpFile, err = createDownloadFile()
   180  		if err != nil {
   181  			return nil, 0, xfer.DoNotRetry{Err: err}
   182  		}
   183  	} else {
   184  		offset, err = ld.tmpFile.Seek(0, io.SeekEnd)
   185  		if err != nil {
   186  			logrus.Debugf("error seeking to end of download file: %v", err)
   187  			offset = 0
   188  
   189  			ld.tmpFile.Close()
   190  			if err := os.Remove(ld.tmpFile.Name()); err != nil {
   191  				logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
   192  			}
   193  			ld.tmpFile, err = createDownloadFile()
   194  			if err != nil {
   195  				return nil, 0, xfer.DoNotRetry{Err: err}
   196  			}
   197  		} else if offset != 0 {
   198  			logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset)
   199  		}
   200  	}
   201  
   202  	tmpFile := ld.tmpFile
   203  
   204  	layerDownload, err := ld.open(ctx)
   205  	if err != nil {
   206  		logrus.Errorf("Error initiating layer download: %v", err)
   207  		return nil, 0, retryOnError(err)
   208  	}
   209  
   210  	if offset != 0 {
   211  		_, err := layerDownload.Seek(offset, io.SeekStart)
   212  		if err != nil {
   213  			if err := ld.truncateDownloadFile(); err != nil {
   214  				return nil, 0, xfer.DoNotRetry{Err: err}
   215  			}
   216  			return nil, 0, err
   217  		}
   218  	}
   219  	size, err := layerDownload.Seek(0, io.SeekEnd)
   220  	if err != nil {
   221  		// Seek failed, perhaps because there was no Content-Length
   222  		// header. This shouldn't fail the download, because we can
   223  		// still continue without a progress bar.
   224  		size = 0
   225  	} else {
   226  		if size != 0 && offset > size {
   227  			logrus.Debug("Partial download is larger than full blob. Starting over")
   228  			offset = 0
   229  			if err := ld.truncateDownloadFile(); err != nil {
   230  				return nil, 0, xfer.DoNotRetry{Err: err}
   231  			}
   232  		}
   233  
   234  		// Restore the seek offset either at the beginning of the
   235  		// stream, or just after the last byte we have from previous
   236  		// attempts.
   237  		_, err = layerDownload.Seek(offset, io.SeekStart)
   238  		if err != nil {
   239  			return nil, 0, err
   240  		}
   241  	}
   242  
   243  	reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading")
   244  	defer reader.Close()
   245  
   246  	if ld.verifier == nil {
   247  		ld.verifier = ld.digest.Verifier()
   248  	}
   249  
   250  	_, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier))
   251  	if err != nil {
   252  		if err == transport.ErrWrongCodeForByteRange {
   253  			if err := ld.truncateDownloadFile(); err != nil {
   254  				return nil, 0, xfer.DoNotRetry{Err: err}
   255  			}
   256  			return nil, 0, err
   257  		}
   258  		return nil, 0, retryOnError(err)
   259  	}
   260  
   261  	progress.Update(progressOutput, ld.ID(), "Verifying Checksum")
   262  
   263  	if !ld.verifier.Verified() {
   264  		err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest)
   265  		logrus.Error(err)
   266  
   267  		// Allow a retry if this digest verification error happened
   268  		// after a resumed download.
   269  		if offset != 0 {
   270  			if err := ld.truncateDownloadFile(); err != nil {
   271  				return nil, 0, xfer.DoNotRetry{Err: err}
   272  			}
   273  
   274  			return nil, 0, err
   275  		}
   276  		return nil, 0, xfer.DoNotRetry{Err: err}
   277  	}
   278  
   279  	progress.Update(progressOutput, ld.ID(), "Download complete")
   280  
   281  	logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name())
   282  
   283  	_, err = tmpFile.Seek(0, io.SeekStart)
   284  	if err != nil {
   285  		tmpFile.Close()
   286  		if err := os.Remove(tmpFile.Name()); err != nil {
   287  			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
   288  		}
   289  		ld.tmpFile = nil
   290  		ld.verifier = nil
   291  		return nil, 0, xfer.DoNotRetry{Err: err}
   292  	}
   293  
   294  	// hand off the temporary file to the download manager, so it will only
   295  	// be closed once
   296  	ld.tmpFile = nil
   297  
   298  	return ioutils.NewReadCloserWrapper(tmpFile, func() error {
   299  		tmpFile.Close()
   300  		err := os.RemoveAll(tmpFile.Name())
   301  		if err != nil {
   302  			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
   303  		}
   304  		return err
   305  	}), size, nil
   306  }
   307  
   308  func (ld *v2LayerDescriptor) Close() {
   309  	if ld.tmpFile != nil {
   310  		ld.tmpFile.Close()
   311  		if err := os.RemoveAll(ld.tmpFile.Name()); err != nil {
   312  			logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
   313  		}
   314  	}
   315  }
   316  
   317  func (ld *v2LayerDescriptor) truncateDownloadFile() error {
   318  	// Need a new hash context since we will be redoing the download
   319  	ld.verifier = nil
   320  
   321  	if _, err := ld.tmpFile.Seek(0, io.SeekStart); err != nil {
   322  		logrus.Errorf("error seeking to beginning of download file: %v", err)
   323  		return err
   324  	}
   325  
   326  	if err := ld.tmpFile.Truncate(0); err != nil {
   327  		logrus.Errorf("error truncating download file: %v", err)
   328  		return err
   329  	}
   330  
   331  	return nil
   332  }
   333  
   334  func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) {
   335  	// Cache mapping from this layer's DiffID to the blobsum
   336  	ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.Name.Name()})
   337  }
   338  
   339  func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named, platform *specs.Platform) (tagUpdated bool, err error) {
   340  
   341  	var (
   342  		tagOrDigest string // Used for logging/progress only
   343  		dgst        digest.Digest
   344  		mt          string
   345  		size        int64
   346  		tagged      reference.NamedTagged
   347  		isTagged    bool
   348  	)
   349  	if digested, isDigested := ref.(reference.Canonical); isDigested {
   350  		dgst = digested.Digest()
   351  		tagOrDigest = digested.String()
   352  	} else if tagged, isTagged = ref.(reference.NamedTagged); isTagged {
   353  		tagService := p.repo.Tags(ctx)
   354  		desc, err := tagService.Get(ctx, tagged.Tag())
   355  		if err != nil {
   356  			return false, allowV1Fallback(err)
   357  		}
   358  
   359  		dgst = desc.Digest
   360  		tagOrDigest = tagged.Tag()
   361  		mt = desc.MediaType
   362  		size = desc.Size
   363  	} else {
   364  		return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", reference.FamiliarString(ref))
   365  	}
   366  
   367  	ctx = log.WithLogger(ctx, logrus.WithFields(
   368  		logrus.Fields{
   369  			"digest": dgst,
   370  			"remote": ref,
   371  		}))
   372  
   373  	desc := specs.Descriptor{
   374  		MediaType: mt,
   375  		Digest:    dgst,
   376  		Size:      size,
   377  	}
   378  	manifest, err := p.manifestStore.Get(ctx, desc)
   379  	if err != nil {
   380  		if isTagged && isNotFound(errors.Cause(err)) {
   381  			logrus.WithField("ref", ref).WithError(err).Debug("Falling back to pull manifest by tag")
   382  
   383  			msg := `%s Failed to pull manifest by the resolved digest. This registry does not
   384  	appear to conform to the distribution registry specification; falling back to
   385  	pull by tag.  This fallback is DEPRECATED, and will be removed in a future
   386  	release.  Please contact admins of %s. %s
   387  `
   388  
   389  			warnEmoji := "\U000026A0\U0000FE0F"
   390  			progress.Messagef(p.config.ProgressOutput, "WARNING", msg, warnEmoji, p.endpoint.URL, warnEmoji)
   391  
   392  			// Fetch by tag worked, but fetch by digest didn't.
   393  			// This is a broken registry implementation.
   394  			// We'll fallback to the old behavior and get the manifest by tag.
   395  			var ms distribution.ManifestService
   396  			ms, err = p.repo.Manifests(ctx)
   397  			if err != nil {
   398  				return false, err
   399  			}
   400  
   401  			manifest, err = ms.Get(ctx, "", distribution.WithTag(tagged.Tag()))
   402  			err = errors.Wrap(err, "error after falling back to get manifest by tag")
   403  		}
   404  		if err != nil {
   405  			return false, err
   406  		}
   407  	}
   408  
   409  	if manifest == nil {
   410  		return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest)
   411  	}
   412  
   413  	if m, ok := manifest.(*schema2.DeserializedManifest); ok {
   414  		var allowedMediatype bool
   415  		for _, t := range p.config.Schema2Types {
   416  			if m.Manifest.Config.MediaType == t {
   417  				allowedMediatype = true
   418  				break
   419  			}
   420  		}
   421  		if !allowedMediatype {
   422  			configClass := mediaTypeClasses[m.Manifest.Config.MediaType]
   423  			if configClass == "" {
   424  				configClass = "unknown"
   425  			}
   426  			return false, invalidManifestClassError{m.Manifest.Config.MediaType, configClass}
   427  		}
   428  	}
   429  
   430  	// If manSvc.Get succeeded, we can be confident that the registry on
   431  	// the other side speaks the v2 protocol.
   432  	p.confirmedV2 = true
   433  
   434  	logrus.Debugf("Pulling ref from V2 registry: %s", reference.FamiliarString(ref))
   435  	progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+reference.FamiliarName(p.repo.Named()))
   436  
   437  	var (
   438  		id             digest.Digest
   439  		manifestDigest digest.Digest
   440  	)
   441  
   442  	switch v := manifest.(type) {
   443  	case *schema1.SignedManifest:
   444  		if p.config.RequireSchema2 {
   445  			return false, fmt.Errorf("invalid manifest: not schema2")
   446  		}
   447  
   448  		// give registries time to upgrade to schema2 and only warn if we know a registry has been upgraded long time ago
   449  		// TODO: condition to be removed
   450  		if reference.Domain(ref) == "docker.io" {
   451  			msg := fmt.Sprintf("Image %s uses outdated schema1 manifest format. Please upgrade to a schema2 image for better future compatibility. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", ref)
   452  			logrus.Warn(msg)
   453  			progress.Message(p.config.ProgressOutput, "", msg)
   454  		}
   455  
   456  		id, manifestDigest, err = p.pullSchema1(ctx, ref, v, platform)
   457  		if err != nil {
   458  			return false, err
   459  		}
   460  	case *schema2.DeserializedManifest:
   461  		id, manifestDigest, err = p.pullSchema2(ctx, ref, v, platform)
   462  		if err != nil {
   463  			return false, err
   464  		}
   465  	case *ocischema.DeserializedManifest:
   466  		id, manifestDigest, err = p.pullOCI(ctx, ref, v, platform)
   467  		if err != nil {
   468  			return false, err
   469  		}
   470  	case *manifestlist.DeserializedManifestList:
   471  		id, manifestDigest, err = p.pullManifestList(ctx, ref, v, platform)
   472  		if err != nil {
   473  			return false, err
   474  		}
   475  	default:
   476  		return false, invalidManifestFormatError{}
   477  	}
   478  
   479  	progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())
   480  
   481  	if p.config.ReferenceStore != nil {
   482  		oldTagID, err := p.config.ReferenceStore.Get(ref)
   483  		if err == nil {
   484  			if oldTagID == id {
   485  				return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id)
   486  			}
   487  		} else if err != refstore.ErrDoesNotExist {
   488  			return false, err
   489  		}
   490  
   491  		if canonical, ok := ref.(reference.Canonical); ok {
   492  			if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil {
   493  				return false, err
   494  			}
   495  		} else {
   496  			if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil {
   497  				return false, err
   498  			}
   499  			if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil {
   500  				return false, err
   501  			}
   502  		}
   503  	}
   504  	return true, nil
   505  }
   506  
   507  func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Reference, unverifiedManifest *schema1.SignedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) {
   508  	var verifiedManifest *schema1.Manifest
   509  	verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref)
   510  	if err != nil {
   511  		return "", "", err
   512  	}
   513  
   514  	rootFS := image.NewRootFS()
   515  
   516  	// remove duplicate layers and check parent chain validity
   517  	err = fixManifestLayers(verifiedManifest)
   518  	if err != nil {
   519  		return "", "", err
   520  	}
   521  
   522  	var descriptors []xfer.DownloadDescriptor
   523  
   524  	// Image history converted to the new format
   525  	var history []image.History
   526  
   527  	// Note that the order of this loop is in the direction of bottom-most
   528  	// to top-most, so that the downloads slice gets ordered correctly.
   529  	for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- {
   530  		blobSum := verifiedManifest.FSLayers[i].BlobSum
   531  		if err = blobSum.Validate(); err != nil {
   532  			return "", "", errors.Wrapf(err, "could not validate layer digest %q", blobSum)
   533  		}
   534  
   535  		var throwAway struct {
   536  			ThrowAway bool `json:"throwaway,omitempty"`
   537  		}
   538  		if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil {
   539  			return "", "", err
   540  		}
   541  
   542  		h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway)
   543  		if err != nil {
   544  			return "", "", err
   545  		}
   546  		history = append(history, h)
   547  
   548  		if throwAway.ThrowAway {
   549  			continue
   550  		}
   551  
   552  		layerDescriptor := &v2LayerDescriptor{
   553  			digest:            blobSum,
   554  			repoInfo:          p.repoInfo,
   555  			repo:              p.repo,
   556  			V2MetadataService: p.V2MetadataService,
   557  		}
   558  
   559  		descriptors = append(descriptors, layerDescriptor)
   560  	}
   561  
   562  	// The v1 manifest itself doesn't directly contain an OS. However,
   563  	// the history does, but unfortunately that's a string, so search through
   564  	// all the history until hopefully we find one which indicates the OS.
   565  	// supertest2014/nyan is an example of a registry image with schemav1.
   566  	configOS := runtime.GOOS
   567  	if system.LCOWSupported() {
   568  		type config struct {
   569  			Os string `json:"os,omitempty"`
   570  		}
   571  		for _, v := range verifiedManifest.History {
   572  			var c config
   573  			if err := json.Unmarshal([]byte(v.V1Compatibility), &c); err == nil {
   574  				if c.Os != "" {
   575  					configOS = c.Os
   576  					break
   577  				}
   578  			}
   579  		}
   580  	}
   581  
   582  	// In the situation that the API call didn't specify an OS explicitly, but
   583  	// we support the operating system, switch to that operating system.
   584  	// eg FROM supertest2014/nyan with no platform specifier, and docker build
   585  	// with no --platform= flag under LCOW.
   586  	requestedOS := ""
   587  	if platform != nil {
   588  		requestedOS = platform.OS
   589  	} else if system.IsOSSupported(configOS) {
   590  		requestedOS = configOS
   591  	}
   592  
   593  	// Early bath if the requested OS doesn't match that of the configuration.
   594  	// This avoids doing the download, only to potentially fail later.
   595  	if !strings.EqualFold(configOS, requestedOS) {
   596  		return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configOS, requestedOS)
   597  	}
   598  
   599  	resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, configOS, descriptors, p.config.ProgressOutput)
   600  	if err != nil {
   601  		return "", "", err
   602  	}
   603  	defer release()
   604  
   605  	config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history)
   606  	if err != nil {
   607  		return "", "", err
   608  	}
   609  
   610  	imageID, err := p.config.ImageStore.Put(ctx, config)
   611  	if err != nil {
   612  		return "", "", err
   613  	}
   614  
   615  	manifestDigest = digest.FromBytes(unverifiedManifest.Canonical)
   616  
   617  	return imageID, manifestDigest, nil
   618  }
   619  
   620  func (p *v2Puller) pullSchema2Layers(ctx context.Context, target distribution.Descriptor, layers []distribution.Descriptor, platform *specs.Platform) (id digest.Digest, err error) {
   621  	if _, err := p.config.ImageStore.Get(ctx, target.Digest); err == nil {
   622  		// If the image already exists locally, no need to pull
   623  		// anything.
   624  		return target.Digest, nil
   625  	}
   626  
   627  	var descriptors []xfer.DownloadDescriptor
   628  
   629  	// Note that the order of this loop is in the direction of bottom-most
   630  	// to top-most, so that the downloads slice gets ordered correctly.
   631  	for _, d := range layers {
   632  		if err := d.Digest.Validate(); err != nil {
   633  			return "", errors.Wrapf(err, "could not validate layer digest %q", d.Digest)
   634  		}
   635  		layerDescriptor := &v2LayerDescriptor{
   636  			digest:            d.Digest,
   637  			repo:              p.repo,
   638  			repoInfo:          p.repoInfo,
   639  			V2MetadataService: p.V2MetadataService,
   640  			src:               d,
   641  		}
   642  
   643  		descriptors = append(descriptors, layerDescriptor)
   644  	}
   645  
   646  	configChan := make(chan []byte, 1)
   647  	configErrChan := make(chan error, 1)
   648  	layerErrChan := make(chan error, 1)
   649  	downloadsDone := make(chan struct{})
   650  	var cancel func()
   651  	ctx, cancel = context.WithCancel(ctx)
   652  	defer cancel()
   653  
   654  	// Pull the image config
   655  	go func() {
   656  		configJSON, err := p.pullSchema2Config(ctx, target.Digest)
   657  		if err != nil {
   658  			configErrChan <- ImageConfigPullError{Err: err}
   659  			cancel()
   660  			return
   661  		}
   662  		configChan <- configJSON
   663  	}()
   664  
   665  	var (
   666  		configJSON       []byte          // raw serialized image config
   667  		downloadedRootFS *image.RootFS   // rootFS from registered layers
   668  		configRootFS     *image.RootFS   // rootFS from configuration
   669  		release          func()          // release resources from rootFS download
   670  		configPlatform   *specs.Platform // for LCOW when registering downloaded layers
   671  	)
   672  
   673  	layerStoreOS := runtime.GOOS
   674  	if platform != nil {
   675  		layerStoreOS = platform.OS
   676  	}
   677  
   678  	// https://github.com/docker/docker/issues/24766 - Err on the side of caution,
   679  	// explicitly blocking images intended for linux from the Windows daemon. On
   680  	// Windows, we do this before the attempt to download, effectively serialising
   681  	// the download slightly slowing it down. We have to do it this way, as
   682  	// chances are the download of layers itself would fail due to file names
   683  	// which aren't suitable for NTFS. At some point in the future, if a similar
   684  	// check to block Windows images being pulled on Linux is implemented, it
   685  	// may be necessary to perform the same type of serialisation.
   686  	if runtime.GOOS == "windows" {
   687  		configJSON, configRootFS, configPlatform, err = receiveConfig(p.config.ImageStore, configChan, configErrChan)
   688  		if err != nil {
   689  			return "", err
   690  		}
   691  		if configRootFS == nil {
   692  			return "", errRootFSInvalid
   693  		}
   694  		if err := checkImageCompatibility(configPlatform.OS, configPlatform.OSVersion); err != nil {
   695  			return "", err
   696  		}
   697  
   698  		if len(descriptors) != len(configRootFS.DiffIDs) {
   699  			return "", errRootFSMismatch
   700  		}
   701  		if platform == nil {
   702  			// Early bath if the requested OS doesn't match that of the configuration.
   703  			// This avoids doing the download, only to potentially fail later.
   704  			if !system.IsOSSupported(configPlatform.OS) {
   705  				return "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configPlatform.OS, layerStoreOS)
   706  			}
   707  			layerStoreOS = configPlatform.OS
   708  		}
   709  
   710  		// Populate diff ids in descriptors to avoid downloading foreign layers
   711  		// which have been side loaded
   712  		for i := range descriptors {
   713  			descriptors[i].(*v2LayerDescriptor).diffID = configRootFS.DiffIDs[i]
   714  		}
   715  	}
   716  
   717  	if p.config.DownloadManager != nil {
   718  		go func() {
   719  			var (
   720  				err    error
   721  				rootFS image.RootFS
   722  			)
   723  			downloadRootFS := *image.NewRootFS()
   724  			rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, layerStoreOS, descriptors, p.config.ProgressOutput)
   725  			if err != nil {
   726  				// Intentionally do not cancel the config download here
   727  				// as the error from config download (if there is one)
   728  				// is more interesting than the layer download error
   729  				layerErrChan <- err
   730  				return
   731  			}
   732  
   733  			downloadedRootFS = &rootFS
   734  			close(downloadsDone)
   735  		}()
   736  	} else {
   737  		// We have nothing to download
   738  		close(downloadsDone)
   739  	}
   740  
   741  	if configJSON == nil {
   742  		configJSON, configRootFS, _, err = receiveConfig(p.config.ImageStore, configChan, configErrChan)
   743  		if err == nil && configRootFS == nil {
   744  			err = errRootFSInvalid
   745  		}
   746  		if err != nil {
   747  			cancel()
   748  			select {
   749  			case <-downloadsDone:
   750  			case <-layerErrChan:
   751  			}
   752  			return "", err
   753  		}
   754  	}
   755  
   756  	select {
   757  	case <-downloadsDone:
   758  	case err = <-layerErrChan:
   759  		return "", err
   760  	}
   761  
   762  	if release != nil {
   763  		defer release()
   764  	}
   765  
   766  	if downloadedRootFS != nil {
   767  		// The DiffIDs returned in rootFS MUST match those in the config.
   768  		// Otherwise the image config could be referencing layers that aren't
   769  		// included in the manifest.
   770  		if len(downloadedRootFS.DiffIDs) != len(configRootFS.DiffIDs) {
   771  			return "", errRootFSMismatch
   772  		}
   773  
   774  		for i := range downloadedRootFS.DiffIDs {
   775  			if downloadedRootFS.DiffIDs[i] != configRootFS.DiffIDs[i] {
   776  				return "", errRootFSMismatch
   777  			}
   778  		}
   779  	}
   780  
   781  	imageID, err := p.config.ImageStore.Put(ctx, configJSON)
   782  	if err != nil {
   783  		return "", err
   784  	}
   785  
   786  	return imageID, nil
   787  }
   788  
   789  func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) {
   790  	manifestDigest, err = schema2ManifestDigest(ref, mfst)
   791  	if err != nil {
   792  		return "", "", err
   793  	}
   794  	id, err = p.pullSchema2Layers(ctx, mfst.Target(), mfst.Layers, platform)
   795  	return id, manifestDigest, err
   796  }
   797  
   798  func (p *v2Puller) pullOCI(ctx context.Context, ref reference.Named, mfst *ocischema.DeserializedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) {
   799  	manifestDigest, err = schema2ManifestDigest(ref, mfst)
   800  	if err != nil {
   801  		return "", "", err
   802  	}
   803  	id, err = p.pullSchema2Layers(ctx, mfst.Target(), mfst.Layers, platform)
   804  	return id, manifestDigest, err
   805  }
   806  
   807  func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, *specs.Platform, error) {
   808  	select {
   809  	case configJSON := <-configChan:
   810  		rootfs, err := s.RootFSFromConfig(configJSON)
   811  		if err != nil {
   812  			return nil, nil, nil, err
   813  		}
   814  		platform, err := s.PlatformFromConfig(configJSON)
   815  		if err != nil {
   816  			return nil, nil, nil, err
   817  		}
   818  		return configJSON, rootfs, platform, nil
   819  	case err := <-errChan:
   820  		return nil, nil, nil, err
   821  		// Don't need a case for ctx.Done in the select because cancellation
   822  		// will trigger an error in p.pullSchema2ImageConfig.
   823  	}
   824  }
   825  
   826  // pullManifestList handles "manifest lists" which point to various
   827  // platform-specific manifests.
   828  func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList, pp *specs.Platform) (id digest.Digest, manifestListDigest digest.Digest, err error) {
   829  	manifestListDigest, err = schema2ManifestDigest(ref, mfstList)
   830  	if err != nil {
   831  		return "", "", err
   832  	}
   833  
   834  	var platform specs.Platform
   835  	if pp != nil {
   836  		platform = *pp
   837  	}
   838  	logrus.Debugf("%s resolved to a manifestList object with %d entries; looking for a %s/%s match", ref, len(mfstList.Manifests), platforms.Format(platform), runtime.GOARCH)
   839  
   840  	manifestMatches := filterManifests(mfstList.Manifests, platform)
   841  
   842  	if len(manifestMatches) == 0 {
   843  		errMsg := fmt.Sprintf("no matching manifest for %s in the manifest list entries", formatPlatform(platform))
   844  		logrus.Debugf(errMsg)
   845  		return "", "", errors.New(errMsg)
   846  	}
   847  
   848  	if len(manifestMatches) > 1 {
   849  		logrus.Debugf("found multiple matches in manifest list, choosing best match %s", manifestMatches[0].Digest.String())
   850  	}
   851  	match := manifestMatches[0]
   852  
   853  	if err := checkImageCompatibility(match.Platform.OS, match.Platform.OSVersion); err != nil {
   854  		return "", "", err
   855  	}
   856  
   857  	desc := specs.Descriptor{
   858  		Digest:    match.Digest,
   859  		Size:      match.Size,
   860  		MediaType: match.MediaType,
   861  	}
   862  	manifest, err := p.manifestStore.Get(ctx, desc)
   863  	if err != nil {
   864  		return "", "", err
   865  	}
   866  
   867  	manifestRef, err := reference.WithDigest(reference.TrimNamed(ref), match.Digest)
   868  	if err != nil {
   869  		return "", "", err
   870  	}
   871  
   872  	switch v := manifest.(type) {
   873  	case *schema1.SignedManifest:
   874  		msg := fmt.Sprintf("[DEPRECATION NOTICE] v2 schema1 manifests in manifest lists are not supported and will break in a future release. Suggest author of %s to upgrade to v2 schema2. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", ref)
   875  		logrus.Warn(msg)
   876  		progress.Message(p.config.ProgressOutput, "", msg)
   877  
   878  		platform := toOCIPlatform(manifestMatches[0].Platform)
   879  		id, _, err = p.pullSchema1(ctx, manifestRef, v, &platform)
   880  		if err != nil {
   881  			return "", "", err
   882  		}
   883  	case *schema2.DeserializedManifest:
   884  		platform := toOCIPlatform(manifestMatches[0].Platform)
   885  		id, _, err = p.pullSchema2(ctx, manifestRef, v, &platform)
   886  		if err != nil {
   887  			return "", "", err
   888  		}
   889  	case *ocischema.DeserializedManifest:
   890  		platform := toOCIPlatform(manifestMatches[0].Platform)
   891  		id, _, err = p.pullOCI(ctx, manifestRef, v, &platform)
   892  		if err != nil {
   893  			return "", "", err
   894  		}
   895  	default:
   896  		return "", "", errors.New("unsupported manifest format")
   897  	}
   898  
   899  	return id, manifestListDigest, err
   900  }
   901  
   902  func (p *v2Puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) {
   903  	blobs := p.repo.Blobs(ctx)
   904  	configJSON, err = blobs.Get(ctx, dgst)
   905  	if err != nil {
   906  		return nil, err
   907  	}
   908  
   909  	// Verify image config digest
   910  	verifier := dgst.Verifier()
   911  	if _, err := verifier.Write(configJSON); err != nil {
   912  		return nil, err
   913  	}
   914  	if !verifier.Verified() {
   915  		err := fmt.Errorf("image config verification failed for digest %s", dgst)
   916  		logrus.Error(err)
   917  		return nil, err
   918  	}
   919  
   920  	return configJSON, nil
   921  }
   922  
   923  // schema2ManifestDigest computes the manifest digest, and, if pulling by
   924  // digest, ensures that it matches the requested digest.
   925  func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) {
   926  	_, canonical, err := mfst.Payload()
   927  	if err != nil {
   928  		return "", err
   929  	}
   930  
   931  	// If pull by digest, then verify the manifest digest.
   932  	if digested, isDigested := ref.(reference.Canonical); isDigested {
   933  		verifier := digested.Digest().Verifier()
   934  		if _, err := verifier.Write(canonical); err != nil {
   935  			return "", err
   936  		}
   937  		if !verifier.Verified() {
   938  			err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest())
   939  			logrus.Error(err)
   940  			return "", err
   941  		}
   942  		return digested.Digest(), nil
   943  	}
   944  
   945  	return digest.FromBytes(canonical), nil
   946  }
   947  
   948  // allowV1Fallback checks if the error is a possible reason to fallback to v1
   949  // (even if confirmedV2 has been set already), and if so, wraps the error in
   950  // a fallbackError with confirmedV2 set to false. Otherwise, it returns the
   951  // error unmodified.
   952  func allowV1Fallback(err error) error {
   953  	switch v := err.(type) {
   954  	case errcode.Errors:
   955  		if len(v) != 0 {
   956  			if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) {
   957  				return fallbackError{
   958  					err:         err,
   959  					confirmedV2: false,
   960  					transportOK: true,
   961  				}
   962  			}
   963  		}
   964  	case errcode.Error:
   965  		if shouldV2Fallback(v) {
   966  			return fallbackError{
   967  				err:         err,
   968  				confirmedV2: false,
   969  				transportOK: true,
   970  			}
   971  		}
   972  	case *url.Error:
   973  		if v.Err == auth.ErrNoBasicAuthCredentials {
   974  			return fallbackError{err: err, confirmedV2: false}
   975  		}
   976  	}
   977  
   978  	return err
   979  }
   980  
   981  func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Reference) (m *schema1.Manifest, err error) {
   982  	// If pull by digest, then verify the manifest digest. NOTE: It is
   983  	// important to do this first, before any other content validation. If the
   984  	// digest cannot be verified, don't even bother with those other things.
   985  	if digested, isCanonical := ref.(reference.Canonical); isCanonical {
   986  		verifier := digested.Digest().Verifier()
   987  		if _, err := verifier.Write(signedManifest.Canonical); err != nil {
   988  			return nil, err
   989  		}
   990  		if !verifier.Verified() {
   991  			err := fmt.Errorf("image verification failed for digest %s", digested.Digest())
   992  			logrus.Error(err)
   993  			return nil, err
   994  		}
   995  	}
   996  	m = &signedManifest.Manifest
   997  
   998  	if m.SchemaVersion != 1 {
   999  		return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, reference.FamiliarString(ref))
  1000  	}
  1001  	if len(m.FSLayers) != len(m.History) {
  1002  		return nil, fmt.Errorf("length of history not equal to number of layers for %q", reference.FamiliarString(ref))
  1003  	}
  1004  	if len(m.FSLayers) == 0 {
  1005  		return nil, fmt.Errorf("no FSLayers in manifest for %q", reference.FamiliarString(ref))
  1006  	}
  1007  	return m, nil
  1008  }
  1009  
  1010  // fixManifestLayers removes repeated layers from the manifest and checks the
  1011  // correctness of the parent chain.
  1012  func fixManifestLayers(m *schema1.Manifest) error {
  1013  	imgs := make([]*image.V1Image, len(m.FSLayers))
  1014  	for i := range m.FSLayers {
  1015  		img := &image.V1Image{}
  1016  
  1017  		if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil {
  1018  			return err
  1019  		}
  1020  
  1021  		imgs[i] = img
  1022  		if err := v1.ValidateID(img.ID); err != nil {
  1023  			return err
  1024  		}
  1025  	}
  1026  
  1027  	if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" {
  1028  		// Windows base layer can point to a base layer parent that is not in manifest.
  1029  		return errors.New("invalid parent ID in the base layer of the image")
  1030  	}
  1031  
  1032  	// check general duplicates to error instead of a deadlock
  1033  	idmap := make(map[string]struct{})
  1034  
  1035  	var lastID string
  1036  	for _, img := range imgs {
  1037  		// skip IDs that appear after each other, we handle those later
  1038  		if _, exists := idmap[img.ID]; img.ID != lastID && exists {
  1039  			return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
  1040  		}
  1041  		lastID = img.ID
  1042  		idmap[lastID] = struct{}{}
  1043  	}
  1044  
  1045  	// backwards loop so that we keep the remaining indexes after removing items
  1046  	for i := len(imgs) - 2; i >= 0; i-- {
  1047  		if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
  1048  			m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
  1049  			m.History = append(m.History[:i], m.History[i+1:]...)
  1050  		} else if imgs[i].Parent != imgs[i+1].ID {
  1051  			return fmt.Errorf("invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent)
  1052  		}
  1053  	}
  1054  
  1055  	return nil
  1056  }
  1057  
  1058  func createDownloadFile() (*os.File, error) {
  1059  	return ioutil.TempFile("", "GetImageBlob")
  1060  }
  1061  
  1062  func toOCIPlatform(p manifestlist.PlatformSpec) specs.Platform {
  1063  	return specs.Platform{
  1064  		OS:           p.OS,
  1065  		Architecture: p.Architecture,
  1066  		Variant:      p.Variant,
  1067  		OSFeatures:   p.OSFeatures,
  1068  		OSVersion:    p.OSVersion,
  1069  	}
  1070  }