github.com/zhuohuang-hust/src-cbuild@v0.0.0-20230105071821-c7aab3e7c840/distribution/pull_v2.go (about)

     1  package distribution
     2  
     3  import (
     4  	"encoding/json"
     5  	"errors"
     6  	"fmt"
     7  	"io"
     8  	"io/ioutil"
     9  	"net/url"
    10  	"os"
    11  	"runtime"
    12  
    13  	"github.com/Sirupsen/logrus"
    14  	"github.com/docker/distribution"
    15  	"github.com/docker/distribution/digest"
    16  	"github.com/docker/distribution/manifest/manifestlist"
    17  	"github.com/docker/distribution/manifest/schema1"
    18  	"github.com/docker/distribution/manifest/schema2"
    19  	"github.com/docker/distribution/registry/api/errcode"
    20  	"github.com/docker/distribution/registry/client/auth"
    21  	"github.com/docker/distribution/registry/client/transport"
    22  	"github.com/docker/docker/distribution/metadata"
    23  	"github.com/docker/docker/distribution/xfer"
    24  	"github.com/docker/docker/image"
    25  	"github.com/docker/docker/image/v1"
    26  	"github.com/docker/docker/layer"
    27  	"github.com/docker/docker/pkg/ioutils"
    28  	"github.com/docker/docker/pkg/progress"
    29  	"github.com/docker/docker/pkg/stringid"
    30  	"github.com/docker/docker/reference"
    31  	"github.com/docker/docker/registry"
    32  	"golang.org/x/net/context"
    33  )
    34  
    35  var (
    36  	errRootFSMismatch  = errors.New("layers from manifest don't match image configuration")
    37  	errMediaTypePlugin = errors.New("target is a plugin")
    38  	errRootFSInvalid   = errors.New("invalid rootfs in image configuration")
    39  )
    40  
    41  // ImageConfigPullError is an error pulling the image config blob
    42  // (only applies to schema2).
    43  type ImageConfigPullError struct {
    44  	Err error
    45  }
    46  
    47  // Error returns the error string for ImageConfigPullError.
    48  func (e ImageConfigPullError) Error() string {
    49  	return "error pulling image configuration: " + e.Err.Error()
    50  }
    51  
    52  type v2Puller struct {
    53  	V2MetadataService metadata.V2MetadataService
    54  	endpoint          registry.APIEndpoint
    55  	config            *ImagePullConfig
    56  	repoInfo          *registry.RepositoryInfo
    57  	repo              distribution.Repository
    58  	// confirmedV2 is set to true if we confirm we're talking to a v2
    59  	// registry. This is used to limit fallbacks to the v1 protocol.
    60  	confirmedV2 bool
    61  }
    62  
    63  func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) {
    64  	// TODO(tiborvass): was ReceiveTimeout
    65  	p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull")
    66  	if err != nil {
    67  		logrus.Warnf("Error getting v2 registry: %v", err)
    68  		return err
    69  	}
    70  
    71  	if err = p.pullV2Repository(ctx, ref); err != nil {
    72  		if _, ok := err.(fallbackError); ok {
    73  			return err
    74  		}
    75  		if continueOnError(err) {
    76  			logrus.Errorf("Error trying v2 registry: %v", err)
    77  			return fallbackError{
    78  				err:         err,
    79  				confirmedV2: p.confirmedV2,
    80  				transportOK: true,
    81  			}
    82  		}
    83  	}
    84  	return err
    85  }
    86  
    87  func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) {
    88  	var layersDownloaded bool
    89  	if !reference.IsNameOnly(ref) {
    90  		layersDownloaded, err = p.pullV2Tag(ctx, ref)
    91  		if err != nil {
    92  			return err
    93  		}
    94  	} else {
    95  		tags, err := p.repo.Tags(ctx).All(ctx)
    96  		if err != nil {
    97  			// If this repository doesn't exist on V2, we should
    98  			// permit a fallback to V1.
    99  			return allowV1Fallback(err)
   100  		}
   101  
   102  		// The v2 registry knows about this repository, so we will not
   103  		// allow fallback to the v1 protocol even if we encounter an
   104  		// error later on.
   105  		p.confirmedV2 = true
   106  
   107  		for _, tag := range tags {
   108  			tagRef, err := reference.WithTag(ref, tag)
   109  			if err != nil {
   110  				return err
   111  			}
   112  			pulledNew, err := p.pullV2Tag(ctx, tagRef)
   113  			if err != nil {
   114  				// Since this is the pull-all-tags case, don't
   115  				// allow an error pulling a particular tag to
   116  				// make the whole pull fall back to v1.
   117  				if fallbackErr, ok := err.(fallbackError); ok {
   118  					return fallbackErr.err
   119  				}
   120  				return err
   121  			}
   122  			// pulledNew is true if either new layers were downloaded OR if existing images were newly tagged
   123  			// TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus?
   124  			layersDownloaded = layersDownloaded || pulledNew
   125  		}
   126  	}
   127  
   128  	writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded)
   129  
   130  	return nil
   131  }
   132  
   133  type v2LayerDescriptor struct {
   134  	digest            digest.Digest
   135  	repoInfo          *registry.RepositoryInfo
   136  	repo              distribution.Repository
   137  	V2MetadataService metadata.V2MetadataService
   138  	tmpFile           *os.File
   139  	verifier          digest.Verifier
   140  	src               distribution.Descriptor
   141  }
   142  
   143  func (ld *v2LayerDescriptor) Key() string {
   144  	return "v2:" + ld.digest.String()
   145  }
   146  
   147  func (ld *v2LayerDescriptor) ID() string {
   148  	return stringid.TruncateID(ld.digest.String())
   149  }
   150  
   151  func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) {
   152  	return ld.V2MetadataService.GetDiffID(ld.digest)
   153  }
   154  
   155  func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
   156  	logrus.Debugf("pulling blob %q", ld.digest)
   157  
   158  	var (
   159  		err    error
   160  		offset int64
   161  	)
   162  
   163  	if ld.tmpFile == nil {
   164  		ld.tmpFile, err = createDownloadFile()
   165  		if err != nil {
   166  			return nil, 0, xfer.DoNotRetry{Err: err}
   167  		}
   168  	} else {
   169  		offset, err = ld.tmpFile.Seek(0, os.SEEK_END)
   170  		if err != nil {
   171  			logrus.Debugf("error seeking to end of download file: %v", err)
   172  			offset = 0
   173  
   174  			ld.tmpFile.Close()
   175  			if err := os.Remove(ld.tmpFile.Name()); err != nil {
   176  				logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
   177  			}
   178  			ld.tmpFile, err = createDownloadFile()
   179  			if err != nil {
   180  				return nil, 0, xfer.DoNotRetry{Err: err}
   181  			}
   182  		} else if offset != 0 {
   183  			logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset)
   184  		}
   185  	}
   186  
   187  	tmpFile := ld.tmpFile
   188  
   189  	layerDownload, err := ld.open(ctx)
   190  	if err != nil {
   191  		logrus.Errorf("Error initiating layer download: %v", err)
   192  		return nil, 0, retryOnError(err)
   193  	}
   194  
   195  	if offset != 0 {
   196  		_, err := layerDownload.Seek(offset, os.SEEK_SET)
   197  		if err != nil {
   198  			if err := ld.truncateDownloadFile(); err != nil {
   199  				return nil, 0, xfer.DoNotRetry{Err: err}
   200  			}
   201  			return nil, 0, err
   202  		}
   203  	}
   204  	size, err := layerDownload.Seek(0, os.SEEK_END)
   205  	if err != nil {
   206  		// Seek failed, perhaps because there was no Content-Length
   207  		// header. This shouldn't fail the download, because we can
   208  		// still continue without a progress bar.
   209  		size = 0
   210  	} else {
   211  		if size != 0 && offset > size {
   212  			logrus.Debug("Partial download is larger than full blob. Starting over")
   213  			offset = 0
   214  			if err := ld.truncateDownloadFile(); err != nil {
   215  				return nil, 0, xfer.DoNotRetry{Err: err}
   216  			}
   217  		}
   218  
   219  		// Restore the seek offset either at the beginning of the
   220  		// stream, or just after the last byte we have from previous
   221  		// attempts.
   222  		_, err = layerDownload.Seek(offset, os.SEEK_SET)
   223  		if err != nil {
   224  			return nil, 0, err
   225  		}
   226  	}
   227  
   228  	reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading")
   229  	defer reader.Close()
   230  
   231  	if ld.verifier == nil {
   232  		ld.verifier, err = digest.NewDigestVerifier(ld.digest)
   233  		if err != nil {
   234  			return nil, 0, xfer.DoNotRetry{Err: err}
   235  		}
   236  	}
   237  
   238  	_, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier))
   239  	if err != nil {
   240  		if err == transport.ErrWrongCodeForByteRange {
   241  			if err := ld.truncateDownloadFile(); err != nil {
   242  				return nil, 0, xfer.DoNotRetry{Err: err}
   243  			}
   244  			return nil, 0, err
   245  		}
   246  		return nil, 0, retryOnError(err)
   247  	}
   248  
   249  	progress.Update(progressOutput, ld.ID(), "Verifying Checksum")
   250  
   251  	if !ld.verifier.Verified() {
   252  		err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest)
   253  		logrus.Error(err)
   254  
   255  		// Allow a retry if this digest verification error happened
   256  		// after a resumed download.
   257  		if offset != 0 {
   258  			if err := ld.truncateDownloadFile(); err != nil {
   259  				return nil, 0, xfer.DoNotRetry{Err: err}
   260  			}
   261  
   262  			return nil, 0, err
   263  		}
   264  		return nil, 0, xfer.DoNotRetry{Err: err}
   265  	}
   266  
   267  	progress.Update(progressOutput, ld.ID(), "Download complete")
   268  
   269  	logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name())
   270  
   271  	_, err = tmpFile.Seek(0, os.SEEK_SET)
   272  	if err != nil {
   273  		tmpFile.Close()
   274  		if err := os.Remove(tmpFile.Name()); err != nil {
   275  			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
   276  		}
   277  		ld.tmpFile = nil
   278  		ld.verifier = nil
   279  		return nil, 0, xfer.DoNotRetry{Err: err}
   280  	}
   281  
   282  	// hand off the temporary file to the download manager, so it will only
   283  	// be closed once
   284  	ld.tmpFile = nil
   285  
   286  	return ioutils.NewReadCloserWrapper(tmpFile, func() error {
   287  		tmpFile.Close()
   288  		err := os.RemoveAll(tmpFile.Name())
   289  		if err != nil {
   290  			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
   291  		}
   292  		return err
   293  	}), size, nil
   294  }
   295  
   296  func (ld *v2LayerDescriptor) Close() {
   297  	if ld.tmpFile != nil {
   298  		ld.tmpFile.Close()
   299  		if err := os.RemoveAll(ld.tmpFile.Name()); err != nil {
   300  			logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
   301  		}
   302  	}
   303  }
   304  
   305  func (ld *v2LayerDescriptor) truncateDownloadFile() error {
   306  	// Need a new hash context since we will be redoing the download
   307  	ld.verifier = nil
   308  
   309  	if _, err := ld.tmpFile.Seek(0, os.SEEK_SET); err != nil {
   310  		logrus.Errorf("error seeking to beginning of download file: %v", err)
   311  		return err
   312  	}
   313  
   314  	if err := ld.tmpFile.Truncate(0); err != nil {
   315  		logrus.Errorf("error truncating download file: %v", err)
   316  		return err
   317  	}
   318  
   319  	return nil
   320  }
   321  
   322  func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) {
   323  	// Cache mapping from this layer's DiffID to the blobsum
   324  	ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.FullName()})
   325  }
   326  
   327  func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) {
   328  	manSvc, err := p.repo.Manifests(ctx)
   329  	if err != nil {
   330  		return false, err
   331  	}
   332  
   333  	var (
   334  		manifest    distribution.Manifest
   335  		tagOrDigest string // Used for logging/progress only
   336  	)
   337  	if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
   338  		manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag()))
   339  		if err != nil {
   340  			return false, allowV1Fallback(err)
   341  		}
   342  		tagOrDigest = tagged.Tag()
   343  	} else if digested, isDigested := ref.(reference.Canonical); isDigested {
   344  		manifest, err = manSvc.Get(ctx, digested.Digest())
   345  		if err != nil {
   346  			return false, err
   347  		}
   348  		tagOrDigest = digested.Digest().String()
   349  	} else {
   350  		return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String())
   351  	}
   352  
   353  	if manifest == nil {
   354  		return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest)
   355  	}
   356  
   357  	if m, ok := manifest.(*schema2.DeserializedManifest); ok {
   358  		if m.Manifest.Config.MediaType == schema2.MediaTypePluginConfig ||
   359  			m.Manifest.Config.MediaType == "application/vnd.docker.plugin.image.v0+json" { //TODO: remove this v0 before 1.13 GA
   360  			return false, errMediaTypePlugin
   361  		}
   362  	}
   363  
   364  	// If manSvc.Get succeeded, we can be confident that the registry on
   365  	// the other side speaks the v2 protocol.
   366  	p.confirmedV2 = true
   367  
   368  	logrus.Debugf("Pulling ref from V2 registry: %s", ref.String())
   369  	progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Named().Name())
   370  
   371  	var (
   372  		id             digest.Digest
   373  		manifestDigest digest.Digest
   374  	)
   375  
   376  	switch v := manifest.(type) {
   377  	case *schema1.SignedManifest:
   378  		id, manifestDigest, err = p.pullSchema1(ctx, ref, v)
   379  		if err != nil {
   380  			return false, err
   381  		}
   382  	case *schema2.DeserializedManifest:
   383  		id, manifestDigest, err = p.pullSchema2(ctx, ref, v)
   384  		if err != nil {
   385  			return false, err
   386  		}
   387  	case *manifestlist.DeserializedManifestList:
   388  		id, manifestDigest, err = p.pullManifestList(ctx, ref, v)
   389  		if err != nil {
   390  			return false, err
   391  		}
   392  	default:
   393  		return false, errors.New("unsupported manifest format")
   394  	}
   395  
   396  	progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())
   397  
   398  	oldTagID, err := p.config.ReferenceStore.Get(ref)
   399  	if err == nil {
   400  		if oldTagID == id {
   401  			return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id)
   402  		}
   403  	} else if err != reference.ErrDoesNotExist {
   404  		return false, err
   405  	}
   406  
   407  	if canonical, ok := ref.(reference.Canonical); ok {
   408  		if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil {
   409  			return false, err
   410  		}
   411  	} else {
   412  		if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil {
   413  			return false, err
   414  		}
   415  		if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil {
   416  			return false, err
   417  		}
   418  	}
   419  	return true, nil
   420  }
   421  
   422  func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) {
   423  	var verifiedManifest *schema1.Manifest
   424  	verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref)
   425  	if err != nil {
   426  		return "", "", err
   427  	}
   428  
   429  	rootFS := image.NewRootFS()
   430  
   431  	// remove duplicate layers and check parent chain validity
   432  	err = fixManifestLayers(verifiedManifest)
   433  	if err != nil {
   434  		return "", "", err
   435  	}
   436  
   437  	var descriptors []xfer.DownloadDescriptor
   438  
   439  	// Image history converted to the new format
   440  	var history []image.History
   441  
   442  	// Note that the order of this loop is in the direction of bottom-most
   443  	// to top-most, so that the downloads slice gets ordered correctly.
   444  	for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- {
   445  		blobSum := verifiedManifest.FSLayers[i].BlobSum
   446  
   447  		var throwAway struct {
   448  			ThrowAway bool `json:"throwaway,omitempty"`
   449  		}
   450  		if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil {
   451  			return "", "", err
   452  		}
   453  
   454  		h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway)
   455  		if err != nil {
   456  			return "", "", err
   457  		}
   458  		history = append(history, h)
   459  
   460  		if throwAway.ThrowAway {
   461  			continue
   462  		}
   463  
   464  		layerDescriptor := &v2LayerDescriptor{
   465  			digest:            blobSum,
   466  			repoInfo:          p.repoInfo,
   467  			repo:              p.repo,
   468  			V2MetadataService: p.V2MetadataService,
   469  		}
   470  
   471  		descriptors = append(descriptors, layerDescriptor)
   472  	}
   473  
   474  	resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput)
   475  	if err != nil {
   476  		return "", "", err
   477  	}
   478  	defer release()
   479  
   480  	config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history)
   481  	if err != nil {
   482  		return "", "", err
   483  	}
   484  
   485  	imageID, err := p.config.ImageStore.Create(config)
   486  	if err != nil {
   487  		return "", "", err
   488  	}
   489  
   490  	manifestDigest = digest.FromBytes(unverifiedManifest.Canonical)
   491  
   492  	return imageID.Digest(), manifestDigest, nil
   493  }
   494  
   495  func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) {
   496  	manifestDigest, err = schema2ManifestDigest(ref, mfst)
   497  	if err != nil {
   498  		return "", "", err
   499  	}
   500  
   501  	target := mfst.Target()
   502  	if _, err := p.config.ImageStore.Get(image.IDFromDigest(target.Digest)); err == nil {
   503  		// If the image already exists locally, no need to pull
   504  		// anything.
   505  		return target.Digest, manifestDigest, nil
   506  	}
   507  
   508  	var descriptors []xfer.DownloadDescriptor
   509  
   510  	// Note that the order of this loop is in the direction of bottom-most
   511  	// to top-most, so that the downloads slice gets ordered correctly.
   512  	for _, d := range mfst.Layers {
   513  		layerDescriptor := &v2LayerDescriptor{
   514  			digest:            d.Digest,
   515  			repo:              p.repo,
   516  			repoInfo:          p.repoInfo,
   517  			V2MetadataService: p.V2MetadataService,
   518  			src:               d,
   519  		}
   520  
   521  		descriptors = append(descriptors, layerDescriptor)
   522  	}
   523  
   524  	configChan := make(chan []byte, 1)
   525  	errChan := make(chan error, 1)
   526  	var cancel func()
   527  	ctx, cancel = context.WithCancel(ctx)
   528  
   529  	// Pull the image config
   530  	go func() {
   531  		configJSON, err := p.pullSchema2Config(ctx, target.Digest)
   532  		if err != nil {
   533  			errChan <- ImageConfigPullError{Err: err}
   534  			cancel()
   535  			return
   536  		}
   537  		configChan <- configJSON
   538  	}()
   539  
   540  	var (
   541  		configJSON         []byte       // raw serialized image config
   542  		unmarshalledConfig image.Image  // deserialized image config
   543  		downloadRootFS     image.RootFS // rootFS to use for registering layers.
   544  	)
   545  
   546  	// https://github.com/docker/docker/issues/24766 - Err on the side of caution,
   547  	// explicitly blocking images intended for linux from the Windows daemon. On
   548  	// Windows, we do this before the attempt to download, effectively serialising
   549  	// the download slightly slowing it down. We have to do it this way, as
   550  	// chances are the download of layers itself would fail due to file names
   551  	// which aren't suitable for NTFS. At some point in the future, if a similar
   552  	// check to block Windows images being pulled on Linux is implemented, it
   553  	// may be necessary to perform the same type of serialisation.
   554  	if runtime.GOOS == "windows" {
   555  		configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan)
   556  		if err != nil {
   557  			return "", "", err
   558  		}
   559  
   560  		if unmarshalledConfig.RootFS == nil {
   561  			return "", "", errRootFSInvalid
   562  		}
   563  
   564  		if unmarshalledConfig.OS == "linux" {
   565  			return "", "", fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS)
   566  		}
   567  	}
   568  
   569  	downloadRootFS = *image.NewRootFS()
   570  
   571  	rootFS, release, err := p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput)
   572  	if err != nil {
   573  		if configJSON != nil {
   574  			// Already received the config
   575  			return "", "", err
   576  		}
   577  		select {
   578  		case err = <-errChan:
   579  			return "", "", err
   580  		default:
   581  			cancel()
   582  			select {
   583  			case <-configChan:
   584  			case <-errChan:
   585  			}
   586  			return "", "", err
   587  		}
   588  	}
   589  	defer release()
   590  
   591  	if configJSON == nil {
   592  		configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan)
   593  		if err != nil {
   594  			return "", "", err
   595  		}
   596  
   597  		if unmarshalledConfig.RootFS == nil {
   598  			return "", "", errRootFSInvalid
   599  		}
   600  	}
   601  
   602  	// The DiffIDs returned in rootFS MUST match those in the config.
   603  	// Otherwise the image config could be referencing layers that aren't
   604  	// included in the manifest.
   605  	if len(rootFS.DiffIDs) != len(unmarshalledConfig.RootFS.DiffIDs) {
   606  		return "", "", errRootFSMismatch
   607  	}
   608  
   609  	for i := range rootFS.DiffIDs {
   610  		if rootFS.DiffIDs[i] != unmarshalledConfig.RootFS.DiffIDs[i] {
   611  			return "", "", errRootFSMismatch
   612  		}
   613  	}
   614  
   615  	imageID, err := p.config.ImageStore.Create(configJSON)
   616  	if err != nil {
   617  		return "", "", err
   618  	}
   619  
   620  	return imageID.Digest(), manifestDigest, nil
   621  }
   622  
   623  func receiveConfig(configChan <-chan []byte, errChan <-chan error) ([]byte, image.Image, error) {
   624  	select {
   625  	case configJSON := <-configChan:
   626  		var unmarshalledConfig image.Image
   627  		if err := json.Unmarshal(configJSON, &unmarshalledConfig); err != nil {
   628  			return nil, image.Image{}, err
   629  		}
   630  		return configJSON, unmarshalledConfig, nil
   631  	case err := <-errChan:
   632  		return nil, image.Image{}, err
   633  		// Don't need a case for ctx.Done in the select because cancellation
   634  		// will trigger an error in p.pullSchema2ImageConfig.
   635  	}
   636  }
   637  
   638  // pullManifestList handles "manifest lists" which point to various
   639  // platform-specifc manifests.
   640  func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (id digest.Digest, manifestListDigest digest.Digest, err error) {
   641  	manifestListDigest, err = schema2ManifestDigest(ref, mfstList)
   642  	if err != nil {
   643  		return "", "", err
   644  	}
   645  
   646  	var manifestDigest digest.Digest
   647  	for _, manifestDescriptor := range mfstList.Manifests {
   648  		// TODO(aaronl): The manifest list spec supports optional
   649  		// "features" and "variant" fields. These are not yet used.
   650  		// Once they are, their values should be interpreted here.
   651  		if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS {
   652  			manifestDigest = manifestDescriptor.Digest
   653  			break
   654  		}
   655  	}
   656  
   657  	if manifestDigest == "" {
   658  		return "", "", errors.New("no supported platform found in manifest list")
   659  	}
   660  
   661  	manSvc, err := p.repo.Manifests(ctx)
   662  	if err != nil {
   663  		return "", "", err
   664  	}
   665  
   666  	manifest, err := manSvc.Get(ctx, manifestDigest)
   667  	if err != nil {
   668  		return "", "", err
   669  	}
   670  
   671  	manifestRef, err := reference.WithDigest(reference.TrimNamed(ref), manifestDigest)
   672  	if err != nil {
   673  		return "", "", err
   674  	}
   675  
   676  	switch v := manifest.(type) {
   677  	case *schema1.SignedManifest:
   678  		id, _, err = p.pullSchema1(ctx, manifestRef, v)
   679  		if err != nil {
   680  			return "", "", err
   681  		}
   682  	case *schema2.DeserializedManifest:
   683  		id, _, err = p.pullSchema2(ctx, manifestRef, v)
   684  		if err != nil {
   685  			return "", "", err
   686  		}
   687  	default:
   688  		return "", "", errors.New("unsupported manifest format")
   689  	}
   690  
   691  	return id, manifestListDigest, err
   692  }
   693  
   694  func (p *v2Puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) {
   695  	blobs := p.repo.Blobs(ctx)
   696  	configJSON, err = blobs.Get(ctx, dgst)
   697  	if err != nil {
   698  		return nil, err
   699  	}
   700  
   701  	// Verify image config digest
   702  	verifier, err := digest.NewDigestVerifier(dgst)
   703  	if err != nil {
   704  		return nil, err
   705  	}
   706  	if _, err := verifier.Write(configJSON); err != nil {
   707  		return nil, err
   708  	}
   709  	if !verifier.Verified() {
   710  		err := fmt.Errorf("image config verification failed for digest %s", dgst)
   711  		logrus.Error(err)
   712  		return nil, err
   713  	}
   714  
   715  	return configJSON, nil
   716  }
   717  
   718  // schema2ManifestDigest computes the manifest digest, and, if pulling by
   719  // digest, ensures that it matches the requested digest.
   720  func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) {
   721  	_, canonical, err := mfst.Payload()
   722  	if err != nil {
   723  		return "", err
   724  	}
   725  
   726  	// If pull by digest, then verify the manifest digest.
   727  	if digested, isDigested := ref.(reference.Canonical); isDigested {
   728  		verifier, err := digest.NewDigestVerifier(digested.Digest())
   729  		if err != nil {
   730  			return "", err
   731  		}
   732  		if _, err := verifier.Write(canonical); err != nil {
   733  			return "", err
   734  		}
   735  		if !verifier.Verified() {
   736  			err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest())
   737  			logrus.Error(err)
   738  			return "", err
   739  		}
   740  		return digested.Digest(), nil
   741  	}
   742  
   743  	return digest.FromBytes(canonical), nil
   744  }
   745  
   746  // allowV1Fallback checks if the error is a possible reason to fallback to v1
   747  // (even if confirmedV2 has been set already), and if so, wraps the error in
   748  // a fallbackError with confirmedV2 set to false. Otherwise, it returns the
   749  // error unmodified.
   750  func allowV1Fallback(err error) error {
   751  	switch v := err.(type) {
   752  	case errcode.Errors:
   753  		if len(v) != 0 {
   754  			if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) {
   755  				return fallbackError{
   756  					err:         err,
   757  					confirmedV2: false,
   758  					transportOK: true,
   759  				}
   760  			}
   761  		}
   762  	case errcode.Error:
   763  		if shouldV2Fallback(v) {
   764  			return fallbackError{
   765  				err:         err,
   766  				confirmedV2: false,
   767  				transportOK: true,
   768  			}
   769  		}
   770  	case *url.Error:
   771  		if v.Err == auth.ErrNoBasicAuthCredentials {
   772  			return fallbackError{err: err, confirmedV2: false}
   773  		}
   774  	}
   775  
   776  	return err
   777  }
   778  
   779  func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) {
   780  	// If pull by digest, then verify the manifest digest. NOTE: It is
   781  	// important to do this first, before any other content validation. If the
   782  	// digest cannot be verified, don't even bother with those other things.
   783  	if digested, isCanonical := ref.(reference.Canonical); isCanonical {
   784  		verifier, err := digest.NewDigestVerifier(digested.Digest())
   785  		if err != nil {
   786  			return nil, err
   787  		}
   788  		if _, err := verifier.Write(signedManifest.Canonical); err != nil {
   789  			return nil, err
   790  		}
   791  		if !verifier.Verified() {
   792  			err := fmt.Errorf("image verification failed for digest %s", digested.Digest())
   793  			logrus.Error(err)
   794  			return nil, err
   795  		}
   796  	}
   797  	m = &signedManifest.Manifest
   798  
   799  	if m.SchemaVersion != 1 {
   800  		return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String())
   801  	}
   802  	if len(m.FSLayers) != len(m.History) {
   803  		return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String())
   804  	}
   805  	if len(m.FSLayers) == 0 {
   806  		return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String())
   807  	}
   808  	return m, nil
   809  }
   810  
   811  // fixManifestLayers removes repeated layers from the manifest and checks the
   812  // correctness of the parent chain.
   813  func fixManifestLayers(m *schema1.Manifest) error {
   814  	imgs := make([]*image.V1Image, len(m.FSLayers))
   815  	for i := range m.FSLayers {
   816  		img := &image.V1Image{}
   817  
   818  		if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil {
   819  			return err
   820  		}
   821  
   822  		imgs[i] = img
   823  		if err := v1.ValidateID(img.ID); err != nil {
   824  			return err
   825  		}
   826  	}
   827  
   828  	if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" {
   829  		// Windows base layer can point to a base layer parent that is not in manifest.
   830  		return errors.New("invalid parent ID in the base layer of the image")
   831  	}
   832  
   833  	// check general duplicates to error instead of a deadlock
   834  	idmap := make(map[string]struct{})
   835  
   836  	var lastID string
   837  	for _, img := range imgs {
   838  		// skip IDs that appear after each other, we handle those later
   839  		if _, exists := idmap[img.ID]; img.ID != lastID && exists {
   840  			return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
   841  		}
   842  		lastID = img.ID
   843  		idmap[lastID] = struct{}{}
   844  	}
   845  
   846  	// backwards loop so that we keep the remaining indexes after removing items
   847  	for i := len(imgs) - 2; i >= 0; i-- {
   848  		if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
   849  			m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
   850  			m.History = append(m.History[:i], m.History[i+1:]...)
   851  		} else if imgs[i].Parent != imgs[i+1].ID {
   852  			return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent)
   853  		}
   854  	}
   855  
   856  	return nil
   857  }
   858  
   859  func createDownloadFile() (*os.File, error) {
   860  	return ioutil.TempFile("", "GetImageBlob")
   861  }