github.com/docker/docker@v299999999.0.0-20200612211812-aaf470eca7b5+incompatible/distribution/pull_v2.go (about)

     1  package distribution // import "github.com/docker/docker/distribution"
     2  
     3  import (
     4  	"context"
     5  	"encoding/json"
     6  	"fmt"
     7  	"io"
     8  	"io/ioutil"
     9  	"net/url"
    10  	"os"
    11  	"runtime"
    12  	"strings"
    13  
    14  	"github.com/containerd/containerd/platforms"
    15  	"github.com/docker/distribution"
    16  	"github.com/docker/distribution/manifest/manifestlist"
    17  	"github.com/docker/distribution/manifest/ocischema"
    18  	"github.com/docker/distribution/manifest/schema1"
    19  	"github.com/docker/distribution/manifest/schema2"
    20  	"github.com/docker/distribution/reference"
    21  	"github.com/docker/distribution/registry/api/errcode"
    22  	"github.com/docker/distribution/registry/client/auth"
    23  	"github.com/docker/distribution/registry/client/transport"
    24  	"github.com/docker/docker/distribution/metadata"
    25  	"github.com/docker/docker/distribution/xfer"
    26  	"github.com/docker/docker/image"
    27  	v1 "github.com/docker/docker/image/v1"
    28  	"github.com/docker/docker/layer"
    29  	"github.com/docker/docker/pkg/ioutils"
    30  	"github.com/docker/docker/pkg/progress"
    31  	"github.com/docker/docker/pkg/stringid"
    32  	"github.com/docker/docker/pkg/system"
    33  	refstore "github.com/docker/docker/reference"
    34  	"github.com/docker/docker/registry"
    35  	digest "github.com/opencontainers/go-digest"
    36  	specs "github.com/opencontainers/image-spec/specs-go/v1"
    37  	"github.com/pkg/errors"
    38  	"github.com/sirupsen/logrus"
    39  )
    40  
    41  var (
    42  	errRootFSMismatch = errors.New("layers from manifest don't match image configuration")
    43  	errRootFSInvalid  = errors.New("invalid rootfs in image configuration")
    44  )
    45  
    46  // ImageConfigPullError is an error pulling the image config blob
    47  // (only applies to schema2).
    48  type ImageConfigPullError struct {
    49  	Err error
    50  }
    51  
    52  // Error returns the error string for ImageConfigPullError.
    53  func (e ImageConfigPullError) Error() string {
    54  	return "error pulling image configuration: " + e.Err.Error()
    55  }
    56  
    57  type v2Puller struct {
    58  	V2MetadataService metadata.V2MetadataService
    59  	endpoint          registry.APIEndpoint
    60  	config            *ImagePullConfig
    61  	repoInfo          *registry.RepositoryInfo
    62  	repo              distribution.Repository
    63  	// confirmedV2 is set to true if we confirm we're talking to a v2
    64  	// registry. This is used to limit fallbacks to the v1 protocol.
    65  	confirmedV2 bool
    66  }
    67  
    68  func (p *v2Puller) Pull(ctx context.Context, ref reference.Named, platform *specs.Platform) (err error) {
    69  	// TODO(tiborvass): was ReceiveTimeout
    70  	p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull")
    71  	if err != nil {
    72  		logrus.Warnf("Error getting v2 registry: %v", err)
    73  		return err
    74  	}
    75  
    76  	if err = p.pullV2Repository(ctx, ref, platform); err != nil {
    77  		if _, ok := err.(fallbackError); ok {
    78  			return err
    79  		}
    80  		if continueOnError(err, p.endpoint.Mirror) {
    81  			return fallbackError{
    82  				err:         err,
    83  				confirmedV2: p.confirmedV2,
    84  				transportOK: true,
    85  			}
    86  		}
    87  	}
    88  	return err
    89  }
    90  
    91  func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named, platform *specs.Platform) (err error) {
    92  	var layersDownloaded bool
    93  	if !reference.IsNameOnly(ref) {
    94  		layersDownloaded, err = p.pullV2Tag(ctx, ref, platform)
    95  		if err != nil {
    96  			return err
    97  		}
    98  	} else {
    99  		tags, err := p.repo.Tags(ctx).All(ctx)
   100  		if err != nil {
   101  			// If this repository doesn't exist on V2, we should
   102  			// permit a fallback to V1.
   103  			return allowV1Fallback(err)
   104  		}
   105  
   106  		// The v2 registry knows about this repository, so we will not
   107  		// allow fallback to the v1 protocol even if we encounter an
   108  		// error later on.
   109  		p.confirmedV2 = true
   110  
   111  		for _, tag := range tags {
   112  			tagRef, err := reference.WithTag(ref, tag)
   113  			if err != nil {
   114  				return err
   115  			}
   116  			pulledNew, err := p.pullV2Tag(ctx, tagRef, platform)
   117  			if err != nil {
   118  				// Since this is the pull-all-tags case, don't
   119  				// allow an error pulling a particular tag to
   120  				// make the whole pull fall back to v1.
   121  				if fallbackErr, ok := err.(fallbackError); ok {
   122  					return fallbackErr.err
   123  				}
   124  				return err
   125  			}
   126  			// pulledNew is true if either new layers were downloaded OR if existing images were newly tagged
   127  			// TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus?
   128  			layersDownloaded = layersDownloaded || pulledNew
   129  		}
   130  	}
   131  
   132  	writeStatus(reference.FamiliarString(ref), p.config.ProgressOutput, layersDownloaded)
   133  
   134  	return nil
   135  }
   136  
   137  type v2LayerDescriptor struct {
   138  	digest            digest.Digest
   139  	diffID            layer.DiffID
   140  	repoInfo          *registry.RepositoryInfo
   141  	repo              distribution.Repository
   142  	V2MetadataService metadata.V2MetadataService
   143  	tmpFile           *os.File
   144  	verifier          digest.Verifier
   145  	src               distribution.Descriptor
   146  }
   147  
   148  func (ld *v2LayerDescriptor) Key() string {
   149  	return "v2:" + ld.digest.String()
   150  }
   151  
   152  func (ld *v2LayerDescriptor) ID() string {
   153  	return stringid.TruncateID(ld.digest.String())
   154  }
   155  
   156  func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) {
   157  	if ld.diffID != "" {
   158  		return ld.diffID, nil
   159  	}
   160  	return ld.V2MetadataService.GetDiffID(ld.digest)
   161  }
   162  
   163  func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
   164  	logrus.Debugf("pulling blob %q", ld.digest)
   165  
   166  	var (
   167  		err    error
   168  		offset int64
   169  	)
   170  
   171  	if ld.tmpFile == nil {
   172  		ld.tmpFile, err = createDownloadFile()
   173  		if err != nil {
   174  			return nil, 0, xfer.DoNotRetry{Err: err}
   175  		}
   176  	} else {
   177  		offset, err = ld.tmpFile.Seek(0, io.SeekEnd)
   178  		if err != nil {
   179  			logrus.Debugf("error seeking to end of download file: %v", err)
   180  			offset = 0
   181  
   182  			ld.tmpFile.Close()
   183  			if err := os.Remove(ld.tmpFile.Name()); err != nil {
   184  				logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
   185  			}
   186  			ld.tmpFile, err = createDownloadFile()
   187  			if err != nil {
   188  				return nil, 0, xfer.DoNotRetry{Err: err}
   189  			}
   190  		} else if offset != 0 {
   191  			logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset)
   192  		}
   193  	}
   194  
   195  	tmpFile := ld.tmpFile
   196  
   197  	layerDownload, err := ld.open(ctx)
   198  	if err != nil {
   199  		logrus.Errorf("Error initiating layer download: %v", err)
   200  		return nil, 0, retryOnError(err)
   201  	}
   202  
   203  	if offset != 0 {
   204  		_, err := layerDownload.Seek(offset, io.SeekStart)
   205  		if err != nil {
   206  			if err := ld.truncateDownloadFile(); err != nil {
   207  				return nil, 0, xfer.DoNotRetry{Err: err}
   208  			}
   209  			return nil, 0, err
   210  		}
   211  	}
   212  	size, err := layerDownload.Seek(0, io.SeekEnd)
   213  	if err != nil {
   214  		// Seek failed, perhaps because there was no Content-Length
   215  		// header. This shouldn't fail the download, because we can
   216  		// still continue without a progress bar.
   217  		size = 0
   218  	} else {
   219  		if size != 0 && offset > size {
   220  			logrus.Debug("Partial download is larger than full blob. Starting over")
   221  			offset = 0
   222  			if err := ld.truncateDownloadFile(); err != nil {
   223  				return nil, 0, xfer.DoNotRetry{Err: err}
   224  			}
   225  		}
   226  
   227  		// Restore the seek offset either at the beginning of the
   228  		// stream, or just after the last byte we have from previous
   229  		// attempts.
   230  		_, err = layerDownload.Seek(offset, io.SeekStart)
   231  		if err != nil {
   232  			return nil, 0, err
   233  		}
   234  	}
   235  
   236  	reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading")
   237  	defer reader.Close()
   238  
   239  	if ld.verifier == nil {
   240  		ld.verifier = ld.digest.Verifier()
   241  	}
   242  
   243  	_, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier))
   244  	if err != nil {
   245  		if err == transport.ErrWrongCodeForByteRange {
   246  			if err := ld.truncateDownloadFile(); err != nil {
   247  				return nil, 0, xfer.DoNotRetry{Err: err}
   248  			}
   249  			return nil, 0, err
   250  		}
   251  		return nil, 0, retryOnError(err)
   252  	}
   253  
   254  	progress.Update(progressOutput, ld.ID(), "Verifying Checksum")
   255  
   256  	if !ld.verifier.Verified() {
   257  		err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest)
   258  		logrus.Error(err)
   259  
   260  		// Allow a retry if this digest verification error happened
   261  		// after a resumed download.
   262  		if offset != 0 {
   263  			if err := ld.truncateDownloadFile(); err != nil {
   264  				return nil, 0, xfer.DoNotRetry{Err: err}
   265  			}
   266  
   267  			return nil, 0, err
   268  		}
   269  		return nil, 0, xfer.DoNotRetry{Err: err}
   270  	}
   271  
   272  	progress.Update(progressOutput, ld.ID(), "Download complete")
   273  
   274  	logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name())
   275  
   276  	_, err = tmpFile.Seek(0, io.SeekStart)
   277  	if err != nil {
   278  		tmpFile.Close()
   279  		if err := os.Remove(tmpFile.Name()); err != nil {
   280  			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
   281  		}
   282  		ld.tmpFile = nil
   283  		ld.verifier = nil
   284  		return nil, 0, xfer.DoNotRetry{Err: err}
   285  	}
   286  
   287  	// hand off the temporary file to the download manager, so it will only
   288  	// be closed once
   289  	ld.tmpFile = nil
   290  
   291  	return ioutils.NewReadCloserWrapper(tmpFile, func() error {
   292  		tmpFile.Close()
   293  		err := os.RemoveAll(tmpFile.Name())
   294  		if err != nil {
   295  			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
   296  		}
   297  		return err
   298  	}), size, nil
   299  }
   300  
   301  func (ld *v2LayerDescriptor) Close() {
   302  	if ld.tmpFile != nil {
   303  		ld.tmpFile.Close()
   304  		if err := os.RemoveAll(ld.tmpFile.Name()); err != nil {
   305  			logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
   306  		}
   307  	}
   308  }
   309  
   310  func (ld *v2LayerDescriptor) truncateDownloadFile() error {
   311  	// Need a new hash context since we will be redoing the download
   312  	ld.verifier = nil
   313  
   314  	if _, err := ld.tmpFile.Seek(0, io.SeekStart); err != nil {
   315  		logrus.Errorf("error seeking to beginning of download file: %v", err)
   316  		return err
   317  	}
   318  
   319  	if err := ld.tmpFile.Truncate(0); err != nil {
   320  		logrus.Errorf("error truncating download file: %v", err)
   321  		return err
   322  	}
   323  
   324  	return nil
   325  }
   326  
   327  func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) {
   328  	// Cache mapping from this layer's DiffID to the blobsum
   329  	ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.Name.Name()})
   330  }
   331  
   332  func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named, platform *specs.Platform) (tagUpdated bool, err error) {
   333  	manSvc, err := p.repo.Manifests(ctx)
   334  	if err != nil {
   335  		return false, err
   336  	}
   337  
   338  	var (
   339  		manifest    distribution.Manifest
   340  		tagOrDigest string // Used for logging/progress only
   341  	)
   342  	if digested, isDigested := ref.(reference.Canonical); isDigested {
   343  		manifest, err = manSvc.Get(ctx, digested.Digest())
   344  		if err != nil {
   345  			return false, err
   346  		}
   347  		tagOrDigest = digested.Digest().String()
   348  	} else if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
   349  		manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag()))
   350  		if err != nil {
   351  			return false, allowV1Fallback(err)
   352  		}
   353  		tagOrDigest = tagged.Tag()
   354  	} else {
   355  		return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", reference.FamiliarString(ref))
   356  	}
   357  
   358  	if manifest == nil {
   359  		return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest)
   360  	}
   361  
   362  	if m, ok := manifest.(*schema2.DeserializedManifest); ok {
   363  		var allowedMediatype bool
   364  		for _, t := range p.config.Schema2Types {
   365  			if m.Manifest.Config.MediaType == t {
   366  				allowedMediatype = true
   367  				break
   368  			}
   369  		}
   370  		if !allowedMediatype {
   371  			configClass := mediaTypeClasses[m.Manifest.Config.MediaType]
   372  			if configClass == "" {
   373  				configClass = "unknown"
   374  			}
   375  			return false, invalidManifestClassError{m.Manifest.Config.MediaType, configClass}
   376  		}
   377  	}
   378  
   379  	// If manSvc.Get succeeded, we can be confident that the registry on
   380  	// the other side speaks the v2 protocol.
   381  	p.confirmedV2 = true
   382  
   383  	logrus.Debugf("Pulling ref from V2 registry: %s", reference.FamiliarString(ref))
   384  	progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+reference.FamiliarName(p.repo.Named()))
   385  
   386  	var (
   387  		id             digest.Digest
   388  		manifestDigest digest.Digest
   389  	)
   390  
   391  	switch v := manifest.(type) {
   392  	case *schema1.SignedManifest:
   393  		if p.config.RequireSchema2 {
   394  			return false, fmt.Errorf("invalid manifest: not schema2")
   395  		}
   396  
   397  		// give registries time to upgrade to schema2 and only warn if we know a registry has been upgraded long time ago
   398  		// TODO: condition to be removed
   399  		if reference.Domain(ref) == "docker.io" {
   400  			msg := fmt.Sprintf("Image %s uses outdated schema1 manifest format. Please upgrade to a schema2 image for better future compatibility. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", ref)
   401  			logrus.Warn(msg)
   402  			progress.Message(p.config.ProgressOutput, "", msg)
   403  		}
   404  
   405  		id, manifestDigest, err = p.pullSchema1(ctx, ref, v, platform)
   406  		if err != nil {
   407  			return false, err
   408  		}
   409  	case *schema2.DeserializedManifest:
   410  		id, manifestDigest, err = p.pullSchema2(ctx, ref, v, platform)
   411  		if err != nil {
   412  			return false, err
   413  		}
   414  	case *ocischema.DeserializedManifest:
   415  		id, manifestDigest, err = p.pullOCI(ctx, ref, v, platform)
   416  		if err != nil {
   417  			return false, err
   418  		}
   419  	case *manifestlist.DeserializedManifestList:
   420  		id, manifestDigest, err = p.pullManifestList(ctx, ref, v, platform)
   421  		if err != nil {
   422  			return false, err
   423  		}
   424  	default:
   425  		return false, invalidManifestFormatError{}
   426  	}
   427  
   428  	progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())
   429  
   430  	if p.config.ReferenceStore != nil {
   431  		oldTagID, err := p.config.ReferenceStore.Get(ref)
   432  		if err == nil {
   433  			if oldTagID == id {
   434  				return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id)
   435  			}
   436  		} else if err != refstore.ErrDoesNotExist {
   437  			return false, err
   438  		}
   439  
   440  		if canonical, ok := ref.(reference.Canonical); ok {
   441  			if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil {
   442  				return false, err
   443  			}
   444  		} else {
   445  			if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil {
   446  				return false, err
   447  			}
   448  			if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil {
   449  				return false, err
   450  			}
   451  		}
   452  	}
   453  	return true, nil
   454  }
   455  
   456  func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Reference, unverifiedManifest *schema1.SignedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) {
   457  	var verifiedManifest *schema1.Manifest
   458  	verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref)
   459  	if err != nil {
   460  		return "", "", err
   461  	}
   462  
   463  	rootFS := image.NewRootFS()
   464  
   465  	// remove duplicate layers and check parent chain validity
   466  	err = fixManifestLayers(verifiedManifest)
   467  	if err != nil {
   468  		return "", "", err
   469  	}
   470  
   471  	var descriptors []xfer.DownloadDescriptor
   472  
   473  	// Image history converted to the new format
   474  	var history []image.History
   475  
   476  	// Note that the order of this loop is in the direction of bottom-most
   477  	// to top-most, so that the downloads slice gets ordered correctly.
   478  	for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- {
   479  		blobSum := verifiedManifest.FSLayers[i].BlobSum
   480  
   481  		var throwAway struct {
   482  			ThrowAway bool `json:"throwaway,omitempty"`
   483  		}
   484  		if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil {
   485  			return "", "", err
   486  		}
   487  
   488  		h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway)
   489  		if err != nil {
   490  			return "", "", err
   491  		}
   492  		history = append(history, h)
   493  
   494  		if throwAway.ThrowAway {
   495  			continue
   496  		}
   497  
   498  		layerDescriptor := &v2LayerDescriptor{
   499  			digest:            blobSum,
   500  			repoInfo:          p.repoInfo,
   501  			repo:              p.repo,
   502  			V2MetadataService: p.V2MetadataService,
   503  		}
   504  
   505  		descriptors = append(descriptors, layerDescriptor)
   506  	}
   507  
   508  	// The v1 manifest itself doesn't directly contain an OS. However,
   509  	// the history does, but unfortunately that's a string, so search through
   510  	// all the history until hopefully we find one which indicates the OS.
   511  	// supertest2014/nyan is an example of a registry image with schemav1.
   512  	configOS := runtime.GOOS
   513  	if system.LCOWSupported() {
   514  		type config struct {
   515  			Os string `json:"os,omitempty"`
   516  		}
   517  		for _, v := range verifiedManifest.History {
   518  			var c config
   519  			if err := json.Unmarshal([]byte(v.V1Compatibility), &c); err == nil {
   520  				if c.Os != "" {
   521  					configOS = c.Os
   522  					break
   523  				}
   524  			}
   525  		}
   526  	}
   527  
   528  	// In the situation that the API call didn't specify an OS explicitly, but
   529  	// we support the operating system, switch to that operating system.
   530  	// eg FROM supertest2014/nyan with no platform specifier, and docker build
   531  	// with no --platform= flag under LCOW.
   532  	requestedOS := ""
   533  	if platform != nil {
   534  		requestedOS = platform.OS
   535  	} else if system.IsOSSupported(configOS) {
   536  		requestedOS = configOS
   537  	}
   538  
   539  	// Early bath if the requested OS doesn't match that of the configuration.
   540  	// This avoids doing the download, only to potentially fail later.
   541  	if !strings.EqualFold(configOS, requestedOS) {
   542  		return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configOS, requestedOS)
   543  	}
   544  
   545  	resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, configOS, descriptors, p.config.ProgressOutput)
   546  	if err != nil {
   547  		return "", "", err
   548  	}
   549  	defer release()
   550  
   551  	config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history)
   552  	if err != nil {
   553  		return "", "", err
   554  	}
   555  
   556  	imageID, err := p.config.ImageStore.Put(config)
   557  	if err != nil {
   558  		return "", "", err
   559  	}
   560  
   561  	manifestDigest = digest.FromBytes(unverifiedManifest.Canonical)
   562  
   563  	return imageID, manifestDigest, nil
   564  }
   565  
   566  func (p *v2Puller) pullSchema2Layers(ctx context.Context, target distribution.Descriptor, layers []distribution.Descriptor, platform *specs.Platform) (id digest.Digest, err error) {
   567  	if _, err := p.config.ImageStore.Get(target.Digest); err == nil {
   568  		// If the image already exists locally, no need to pull
   569  		// anything.
   570  		return target.Digest, nil
   571  	}
   572  
   573  	var descriptors []xfer.DownloadDescriptor
   574  
   575  	// Note that the order of this loop is in the direction of bottom-most
   576  	// to top-most, so that the downloads slice gets ordered correctly.
   577  	for _, d := range layers {
   578  		layerDescriptor := &v2LayerDescriptor{
   579  			digest:            d.Digest,
   580  			repo:              p.repo,
   581  			repoInfo:          p.repoInfo,
   582  			V2MetadataService: p.V2MetadataService,
   583  			src:               d,
   584  		}
   585  
   586  		descriptors = append(descriptors, layerDescriptor)
   587  	}
   588  
   589  	configChan := make(chan []byte, 1)
   590  	configErrChan := make(chan error, 1)
   591  	layerErrChan := make(chan error, 1)
   592  	downloadsDone := make(chan struct{})
   593  	var cancel func()
   594  	ctx, cancel = context.WithCancel(ctx)
   595  	defer cancel()
   596  
   597  	// Pull the image config
   598  	go func() {
   599  		configJSON, err := p.pullSchema2Config(ctx, target.Digest)
   600  		if err != nil {
   601  			configErrChan <- ImageConfigPullError{Err: err}
   602  			cancel()
   603  			return
   604  		}
   605  		configChan <- configJSON
   606  	}()
   607  
   608  	var (
   609  		configJSON       []byte          // raw serialized image config
   610  		downloadedRootFS *image.RootFS   // rootFS from registered layers
   611  		configRootFS     *image.RootFS   // rootFS from configuration
   612  		release          func()          // release resources from rootFS download
   613  		configPlatform   *specs.Platform // for LCOW when registering downloaded layers
   614  	)
   615  
   616  	layerStoreOS := runtime.GOOS
   617  	if platform != nil {
   618  		layerStoreOS = platform.OS
   619  	}
   620  
   621  	// https://github.com/docker/docker/issues/24766 - Err on the side of caution,
   622  	// explicitly blocking images intended for linux from the Windows daemon. On
   623  	// Windows, we do this before the attempt to download, effectively serialising
   624  	// the download slightly slowing it down. We have to do it this way, as
   625  	// chances are the download of layers itself would fail due to file names
   626  	// which aren't suitable for NTFS. At some point in the future, if a similar
   627  	// check to block Windows images being pulled on Linux is implemented, it
   628  	// may be necessary to perform the same type of serialisation.
   629  	if runtime.GOOS == "windows" {
   630  		configJSON, configRootFS, configPlatform, err = receiveConfig(p.config.ImageStore, configChan, configErrChan)
   631  		if err != nil {
   632  			return "", err
   633  		}
   634  		if configRootFS == nil {
   635  			return "", errRootFSInvalid
   636  		}
   637  		if err := checkImageCompatibility(configPlatform.OS, configPlatform.OSVersion); err != nil {
   638  			return "", err
   639  		}
   640  
   641  		if len(descriptors) != len(configRootFS.DiffIDs) {
   642  			return "", errRootFSMismatch
   643  		}
   644  		if platform == nil {
   645  			// Early bath if the requested OS doesn't match that of the configuration.
   646  			// This avoids doing the download, only to potentially fail later.
   647  			if !system.IsOSSupported(configPlatform.OS) {
   648  				return "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configPlatform.OS, layerStoreOS)
   649  			}
   650  			layerStoreOS = configPlatform.OS
   651  		}
   652  
   653  		// Populate diff ids in descriptors to avoid downloading foreign layers
   654  		// which have been side loaded
   655  		for i := range descriptors {
   656  			descriptors[i].(*v2LayerDescriptor).diffID = configRootFS.DiffIDs[i]
   657  		}
   658  	}
   659  
   660  	if p.config.DownloadManager != nil {
   661  		go func() {
   662  			var (
   663  				err    error
   664  				rootFS image.RootFS
   665  			)
   666  			downloadRootFS := *image.NewRootFS()
   667  			rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, layerStoreOS, descriptors, p.config.ProgressOutput)
   668  			if err != nil {
   669  				// Intentionally do not cancel the config download here
   670  				// as the error from config download (if there is one)
   671  				// is more interesting than the layer download error
   672  				layerErrChan <- err
   673  				return
   674  			}
   675  
   676  			downloadedRootFS = &rootFS
   677  			close(downloadsDone)
   678  		}()
   679  	} else {
   680  		// We have nothing to download
   681  		close(downloadsDone)
   682  	}
   683  
   684  	if configJSON == nil {
   685  		configJSON, configRootFS, _, err = receiveConfig(p.config.ImageStore, configChan, configErrChan)
   686  		if err == nil && configRootFS == nil {
   687  			err = errRootFSInvalid
   688  		}
   689  		if err != nil {
   690  			cancel()
   691  			select {
   692  			case <-downloadsDone:
   693  			case <-layerErrChan:
   694  			}
   695  			return "", err
   696  		}
   697  	}
   698  
   699  	select {
   700  	case <-downloadsDone:
   701  	case err = <-layerErrChan:
   702  		return "", err
   703  	}
   704  
   705  	if release != nil {
   706  		defer release()
   707  	}
   708  
   709  	if downloadedRootFS != nil {
   710  		// The DiffIDs returned in rootFS MUST match those in the config.
   711  		// Otherwise the image config could be referencing layers that aren't
   712  		// included in the manifest.
   713  		if len(downloadedRootFS.DiffIDs) != len(configRootFS.DiffIDs) {
   714  			return "", errRootFSMismatch
   715  		}
   716  
   717  		for i := range downloadedRootFS.DiffIDs {
   718  			if downloadedRootFS.DiffIDs[i] != configRootFS.DiffIDs[i] {
   719  				return "", errRootFSMismatch
   720  			}
   721  		}
   722  	}
   723  
   724  	imageID, err := p.config.ImageStore.Put(configJSON)
   725  	if err != nil {
   726  		return "", err
   727  	}
   728  
   729  	return imageID, nil
   730  }
   731  
   732  func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) {
   733  	manifestDigest, err = schema2ManifestDigest(ref, mfst)
   734  	if err != nil {
   735  		return "", "", err
   736  	}
   737  	id, err = p.pullSchema2Layers(ctx, mfst.Target(), mfst.Layers, platform)
   738  	return id, manifestDigest, err
   739  }
   740  
   741  func (p *v2Puller) pullOCI(ctx context.Context, ref reference.Named, mfst *ocischema.DeserializedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) {
   742  	manifestDigest, err = schema2ManifestDigest(ref, mfst)
   743  	if err != nil {
   744  		return "", "", err
   745  	}
   746  	id, err = p.pullSchema2Layers(ctx, mfst.Target(), mfst.Layers, platform)
   747  	return id, manifestDigest, err
   748  }
   749  
   750  func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, *specs.Platform, error) {
   751  	select {
   752  	case configJSON := <-configChan:
   753  		rootfs, err := s.RootFSFromConfig(configJSON)
   754  		if err != nil {
   755  			return nil, nil, nil, err
   756  		}
   757  		platform, err := s.PlatformFromConfig(configJSON)
   758  		if err != nil {
   759  			return nil, nil, nil, err
   760  		}
   761  		return configJSON, rootfs, platform, nil
   762  	case err := <-errChan:
   763  		return nil, nil, nil, err
   764  		// Don't need a case for ctx.Done in the select because cancellation
   765  		// will trigger an error in p.pullSchema2ImageConfig.
   766  	}
   767  }
   768  
   769  // pullManifestList handles "manifest lists" which point to various
   770  // platform-specific manifests.
   771  func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList, pp *specs.Platform) (id digest.Digest, manifestListDigest digest.Digest, err error) {
   772  	manifestListDigest, err = schema2ManifestDigest(ref, mfstList)
   773  	if err != nil {
   774  		return "", "", err
   775  	}
   776  
   777  	var platform specs.Platform
   778  	if pp != nil {
   779  		platform = *pp
   780  	}
   781  	logrus.Debugf("%s resolved to a manifestList object with %d entries; looking for a %s/%s match", ref, len(mfstList.Manifests), platforms.Format(platform), runtime.GOARCH)
   782  
   783  	manifestMatches := filterManifests(mfstList.Manifests, platform)
   784  
   785  	if len(manifestMatches) == 0 {
   786  		errMsg := fmt.Sprintf("no matching manifest for %s in the manifest list entries", formatPlatform(platform))
   787  		logrus.Debugf(errMsg)
   788  		return "", "", errors.New(errMsg)
   789  	}
   790  
   791  	if len(manifestMatches) > 1 {
   792  		logrus.Debugf("found multiple matches in manifest list, choosing best match %s", manifestMatches[0].Digest.String())
   793  	}
   794  	manifestDigest := manifestMatches[0].Digest
   795  
   796  	if err := checkImageCompatibility(manifestMatches[0].Platform.OS, manifestMatches[0].Platform.OSVersion); err != nil {
   797  		return "", "", err
   798  	}
   799  
   800  	manSvc, err := p.repo.Manifests(ctx)
   801  	if err != nil {
   802  		return "", "", err
   803  	}
   804  
   805  	manifest, err := manSvc.Get(ctx, manifestDigest)
   806  	if err != nil {
   807  		return "", "", err
   808  	}
   809  
   810  	manifestRef, err := reference.WithDigest(reference.TrimNamed(ref), manifestDigest)
   811  	if err != nil {
   812  		return "", "", err
   813  	}
   814  
   815  	switch v := manifest.(type) {
   816  	case *schema1.SignedManifest:
   817  		msg := fmt.Sprintf("[DEPRECATION NOTICE] v2 schema1 manifests in manifest lists are not supported and will break in a future release. Suggest author of %s to upgrade to v2 schema2. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", ref)
   818  		logrus.Warn(msg)
   819  		progress.Message(p.config.ProgressOutput, "", msg)
   820  
   821  		platform := toOCIPlatform(manifestMatches[0].Platform)
   822  		id, _, err = p.pullSchema1(ctx, manifestRef, v, &platform)
   823  		if err != nil {
   824  			return "", "", err
   825  		}
   826  	case *schema2.DeserializedManifest:
   827  		platform := toOCIPlatform(manifestMatches[0].Platform)
   828  		id, _, err = p.pullSchema2(ctx, manifestRef, v, &platform)
   829  		if err != nil {
   830  			return "", "", err
   831  		}
   832  	case *ocischema.DeserializedManifest:
   833  		platform := toOCIPlatform(manifestMatches[0].Platform)
   834  		id, _, err = p.pullOCI(ctx, manifestRef, v, &platform)
   835  		if err != nil {
   836  			return "", "", err
   837  		}
   838  	default:
   839  		return "", "", errors.New("unsupported manifest format")
   840  	}
   841  
   842  	return id, manifestListDigest, err
   843  }
   844  
   845  func (p *v2Puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) {
   846  	blobs := p.repo.Blobs(ctx)
   847  	configJSON, err = blobs.Get(ctx, dgst)
   848  	if err != nil {
   849  		return nil, err
   850  	}
   851  
   852  	// Verify image config digest
   853  	verifier := dgst.Verifier()
   854  	if _, err := verifier.Write(configJSON); err != nil {
   855  		return nil, err
   856  	}
   857  	if !verifier.Verified() {
   858  		err := fmt.Errorf("image config verification failed for digest %s", dgst)
   859  		logrus.Error(err)
   860  		return nil, err
   861  	}
   862  
   863  	return configJSON, nil
   864  }
   865  
   866  // schema2ManifestDigest computes the manifest digest, and, if pulling by
   867  // digest, ensures that it matches the requested digest.
   868  func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) {
   869  	_, canonical, err := mfst.Payload()
   870  	if err != nil {
   871  		return "", err
   872  	}
   873  
   874  	// If pull by digest, then verify the manifest digest.
   875  	if digested, isDigested := ref.(reference.Canonical); isDigested {
   876  		verifier := digested.Digest().Verifier()
   877  		if _, err := verifier.Write(canonical); err != nil {
   878  			return "", err
   879  		}
   880  		if !verifier.Verified() {
   881  			err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest())
   882  			logrus.Error(err)
   883  			return "", err
   884  		}
   885  		return digested.Digest(), nil
   886  	}
   887  
   888  	return digest.FromBytes(canonical), nil
   889  }
   890  
   891  // allowV1Fallback checks if the error is a possible reason to fallback to v1
   892  // (even if confirmedV2 has been set already), and if so, wraps the error in
   893  // a fallbackError with confirmedV2 set to false. Otherwise, it returns the
   894  // error unmodified.
   895  func allowV1Fallback(err error) error {
   896  	switch v := err.(type) {
   897  	case errcode.Errors:
   898  		if len(v) != 0 {
   899  			if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) {
   900  				return fallbackError{
   901  					err:         err,
   902  					confirmedV2: false,
   903  					transportOK: true,
   904  				}
   905  			}
   906  		}
   907  	case errcode.Error:
   908  		if shouldV2Fallback(v) {
   909  			return fallbackError{
   910  				err:         err,
   911  				confirmedV2: false,
   912  				transportOK: true,
   913  			}
   914  		}
   915  	case *url.Error:
   916  		if v.Err == auth.ErrNoBasicAuthCredentials {
   917  			return fallbackError{err: err, confirmedV2: false}
   918  		}
   919  	}
   920  
   921  	return err
   922  }
   923  
   924  func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Reference) (m *schema1.Manifest, err error) {
   925  	// If pull by digest, then verify the manifest digest. NOTE: It is
   926  	// important to do this first, before any other content validation. If the
   927  	// digest cannot be verified, don't even bother with those other things.
   928  	if digested, isCanonical := ref.(reference.Canonical); isCanonical {
   929  		verifier := digested.Digest().Verifier()
   930  		if _, err := verifier.Write(signedManifest.Canonical); err != nil {
   931  			return nil, err
   932  		}
   933  		if !verifier.Verified() {
   934  			err := fmt.Errorf("image verification failed for digest %s", digested.Digest())
   935  			logrus.Error(err)
   936  			return nil, err
   937  		}
   938  	}
   939  	m = &signedManifest.Manifest
   940  
   941  	if m.SchemaVersion != 1 {
   942  		return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, reference.FamiliarString(ref))
   943  	}
   944  	if len(m.FSLayers) != len(m.History) {
   945  		return nil, fmt.Errorf("length of history not equal to number of layers for %q", reference.FamiliarString(ref))
   946  	}
   947  	if len(m.FSLayers) == 0 {
   948  		return nil, fmt.Errorf("no FSLayers in manifest for %q", reference.FamiliarString(ref))
   949  	}
   950  	return m, nil
   951  }
   952  
   953  // fixManifestLayers removes repeated layers from the manifest and checks the
   954  // correctness of the parent chain.
   955  func fixManifestLayers(m *schema1.Manifest) error {
   956  	imgs := make([]*image.V1Image, len(m.FSLayers))
   957  	for i := range m.FSLayers {
   958  		img := &image.V1Image{}
   959  
   960  		if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil {
   961  			return err
   962  		}
   963  
   964  		imgs[i] = img
   965  		if err := v1.ValidateID(img.ID); err != nil {
   966  			return err
   967  		}
   968  	}
   969  
   970  	if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" {
   971  		// Windows base layer can point to a base layer parent that is not in manifest.
   972  		return errors.New("invalid parent ID in the base layer of the image")
   973  	}
   974  
   975  	// check general duplicates to error instead of a deadlock
   976  	idmap := make(map[string]struct{})
   977  
   978  	var lastID string
   979  	for _, img := range imgs {
   980  		// skip IDs that appear after each other, we handle those later
   981  		if _, exists := idmap[img.ID]; img.ID != lastID && exists {
   982  			return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
   983  		}
   984  		lastID = img.ID
   985  		idmap[lastID] = struct{}{}
   986  	}
   987  
   988  	// backwards loop so that we keep the remaining indexes after removing items
   989  	for i := len(imgs) - 2; i >= 0; i-- {
   990  		if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
   991  			m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
   992  			m.History = append(m.History[:i], m.History[i+1:]...)
   993  		} else if imgs[i].Parent != imgs[i+1].ID {
   994  			return fmt.Errorf("invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent)
   995  		}
   996  	}
   997  
   998  	return nil
   999  }
  1000  
  1001  func createDownloadFile() (*os.File, error) {
  1002  	return ioutil.TempFile("", "GetImageBlob")
  1003  }
  1004  
  1005  func toOCIPlatform(p manifestlist.PlatformSpec) specs.Platform {
  1006  	return specs.Platform{
  1007  		OS:           p.OS,
  1008  		Architecture: p.Architecture,
  1009  		Variant:      p.Variant,
  1010  		OSFeatures:   p.OSFeatures,
  1011  		OSVersion:    p.OSVersion,
  1012  	}
  1013  }