github.com/DaoCloud/dao@v0.0.0-20161212064103-c3dbfd13ee36/distribution/pull_v2.go (about)

     1  package distribution
     2  
     3  import (
     4  	"encoding/json"
     5  	"errors"
     6  	"fmt"
     7  	"io"
     8  	"io/ioutil"
     9  	"net/url"
    10  	"os"
    11  	"runtime"
    12  
    13  	"github.com/Sirupsen/logrus"
    14  	"github.com/docker/distribution"
    15  	"github.com/docker/distribution/digest"
    16  	"github.com/docker/distribution/manifest/manifestlist"
    17  	"github.com/docker/distribution/manifest/schema1"
    18  	"github.com/docker/distribution/manifest/schema2"
    19  	"github.com/docker/distribution/registry/api/errcode"
    20  	"github.com/docker/distribution/registry/client/auth"
    21  	"github.com/docker/distribution/registry/client/transport"
    22  	"github.com/docker/docker/distribution/metadata"
    23  	"github.com/docker/docker/distribution/xfer"
    24  	"github.com/docker/docker/image"
    25  	"github.com/docker/docker/image/v1"
    26  	"github.com/docker/docker/layer"
    27  	"github.com/docker/docker/pkg/ioutils"
    28  	"github.com/docker/docker/pkg/progress"
    29  	"github.com/docker/docker/pkg/stringid"
    30  	"github.com/docker/docker/reference"
    31  	"github.com/docker/docker/registry"
    32  	"golang.org/x/net/context"
    33  )
    34  
    35  var errRootFSMismatch = errors.New("layers from manifest don't match image configuration")
    36  
    37  // ImageConfigPullError is an error pulling the image config blob
    38  // (only applies to schema2).
    39  type ImageConfigPullError struct {
    40  	Err error
    41  }
    42  
    43  // Error returns the error string for ImageConfigPullError.
    44  func (e ImageConfigPullError) Error() string {
    45  	return "error pulling image configuration: " + e.Err.Error()
    46  }
    47  
    48  type v2Puller struct {
    49  	V2MetadataService *metadata.V2MetadataService
    50  	endpoint          registry.APIEndpoint
    51  	config            *ImagePullConfig
    52  	repoInfo          *registry.RepositoryInfo
    53  	repo              distribution.Repository
    54  	// confirmedV2 is set to true if we confirm we're talking to a v2
    55  	// registry. This is used to limit fallbacks to the v1 protocol.
    56  	confirmedV2 bool
    57  }
    58  
    59  func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) {
    60  	// TODO(tiborvass): was ReceiveTimeout
    61  	p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull")
    62  	if err != nil {
    63  		logrus.Warnf("Error getting v2 registry: %v", err)
    64  		return err
    65  	}
    66  
    67  	if err = p.pullV2Repository(ctx, ref); err != nil {
    68  		if _, ok := err.(fallbackError); ok {
    69  			return err
    70  		}
    71  		if continueOnError(err) {
    72  			logrus.Errorf("Error trying v2 registry: %v", err)
    73  			return fallbackError{
    74  				err:         err,
    75  				confirmedV2: p.confirmedV2,
    76  				transportOK: true,
    77  			}
    78  		}
    79  	}
    80  	return err
    81  }
    82  
    83  func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) {
    84  	var layersDownloaded bool
    85  	if !reference.IsNameOnly(ref) {
    86  		layersDownloaded, err = p.pullV2Tag(ctx, ref)
    87  		if err != nil {
    88  			return err
    89  		}
    90  	} else {
    91  		tags, err := p.repo.Tags(ctx).All(ctx)
    92  		if err != nil {
    93  			// If this repository doesn't exist on V2, we should
    94  			// permit a fallback to V1.
    95  			return allowV1Fallback(err)
    96  		}
    97  
    98  		// The v2 registry knows about this repository, so we will not
    99  		// allow fallback to the v1 protocol even if we encounter an
   100  		// error later on.
   101  		p.confirmedV2 = true
   102  
   103  		for _, tag := range tags {
   104  			tagRef, err := reference.WithTag(ref, tag)
   105  			if err != nil {
   106  				return err
   107  			}
   108  			pulledNew, err := p.pullV2Tag(ctx, tagRef)
   109  			if err != nil {
   110  				// Since this is the pull-all-tags case, don't
   111  				// allow an error pulling a particular tag to
   112  				// make the whole pull fall back to v1.
   113  				if fallbackErr, ok := err.(fallbackError); ok {
   114  					return fallbackErr.err
   115  				}
   116  				return err
   117  			}
   118  			// pulledNew is true if either new layers were downloaded OR if existing images were newly tagged
   119  			// TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus?
   120  			layersDownloaded = layersDownloaded || pulledNew
   121  		}
   122  	}
   123  
   124  	writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded)
   125  
   126  	return nil
   127  }
   128  
   129  type v2LayerDescriptor struct {
   130  	digest            digest.Digest
   131  	repoInfo          *registry.RepositoryInfo
   132  	repo              distribution.Repository
   133  	V2MetadataService *metadata.V2MetadataService
   134  	tmpFile           *os.File
   135  	verifier          digest.Verifier
   136  	src               distribution.Descriptor
   137  }
   138  
   139  func (ld *v2LayerDescriptor) Key() string {
   140  	return "v2:" + ld.digest.String()
   141  }
   142  
   143  func (ld *v2LayerDescriptor) ID() string {
   144  	return stringid.TruncateID(ld.digest.String())
   145  }
   146  
   147  func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) {
   148  	return ld.V2MetadataService.GetDiffID(ld.digest)
   149  }
   150  
   151  func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
   152  	logrus.Debugf("pulling blob %q", ld.digest)
   153  
   154  	var (
   155  		err    error
   156  		offset int64
   157  	)
   158  
   159  	if ld.tmpFile == nil {
   160  		ld.tmpFile, err = createDownloadFile()
   161  		if err != nil {
   162  			return nil, 0, xfer.DoNotRetry{Err: err}
   163  		}
   164  	} else {
   165  		offset, err = ld.tmpFile.Seek(0, os.SEEK_END)
   166  		if err != nil {
   167  			logrus.Debugf("error seeking to end of download file: %v", err)
   168  			offset = 0
   169  
   170  			ld.tmpFile.Close()
   171  			if err := os.Remove(ld.tmpFile.Name()); err != nil {
   172  				logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
   173  			}
   174  			ld.tmpFile, err = createDownloadFile()
   175  			if err != nil {
   176  				return nil, 0, xfer.DoNotRetry{Err: err}
   177  			}
   178  		} else if offset != 0 {
   179  			logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset)
   180  		}
   181  	}
   182  
   183  	tmpFile := ld.tmpFile
   184  
   185  	layerDownload, err := ld.open(ctx)
   186  	if err != nil {
   187  		logrus.Errorf("Error initiating layer download: %v", err)
   188  		if err == distribution.ErrBlobUnknown {
   189  			return nil, 0, xfer.DoNotRetry{Err: err}
   190  		}
   191  		return nil, 0, retryOnError(err)
   192  	}
   193  
   194  	if offset != 0 {
   195  		_, err := layerDownload.Seek(offset, os.SEEK_SET)
   196  		if err != nil {
   197  			if err := ld.truncateDownloadFile(); err != nil {
   198  				return nil, 0, xfer.DoNotRetry{Err: err}
   199  			}
   200  			return nil, 0, err
   201  		}
   202  	}
   203  	size, err := layerDownload.Seek(0, os.SEEK_END)
   204  	if err != nil {
   205  		// Seek failed, perhaps because there was no Content-Length
   206  		// header. This shouldn't fail the download, because we can
   207  		// still continue without a progress bar.
   208  		size = 0
   209  	} else {
   210  		if size != 0 && offset > size {
   211  			logrus.Debug("Partial download is larger than full blob. Starting over")
   212  			offset = 0
   213  			if err := ld.truncateDownloadFile(); err != nil {
   214  				return nil, 0, xfer.DoNotRetry{Err: err}
   215  			}
   216  		}
   217  
   218  		// Restore the seek offset either at the beginning of the
   219  		// stream, or just after the last byte we have from previous
   220  		// attempts.
   221  		_, err = layerDownload.Seek(offset, os.SEEK_SET)
   222  		if err != nil {
   223  			return nil, 0, err
   224  		}
   225  	}
   226  
   227  	reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading")
   228  	defer reader.Close()
   229  
   230  	if ld.verifier == nil {
   231  		ld.verifier, err = digest.NewDigestVerifier(ld.digest)
   232  		if err != nil {
   233  			return nil, 0, xfer.DoNotRetry{Err: err}
   234  		}
   235  	}
   236  
   237  	_, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier))
   238  	if err != nil {
   239  		if err == transport.ErrWrongCodeForByteRange {
   240  			if err := ld.truncateDownloadFile(); err != nil {
   241  				return nil, 0, xfer.DoNotRetry{Err: err}
   242  			}
   243  			return nil, 0, err
   244  		}
   245  		return nil, 0, retryOnError(err)
   246  	}
   247  
   248  	progress.Update(progressOutput, ld.ID(), "Verifying Checksum")
   249  
   250  	if !ld.verifier.Verified() {
   251  		err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest)
   252  		logrus.Error(err)
   253  
   254  		// Allow a retry if this digest verification error happened
   255  		// after a resumed download.
   256  		if offset != 0 {
   257  			if err := ld.truncateDownloadFile(); err != nil {
   258  				return nil, 0, xfer.DoNotRetry{Err: err}
   259  			}
   260  
   261  			return nil, 0, err
   262  		}
   263  		return nil, 0, xfer.DoNotRetry{Err: err}
   264  	}
   265  
   266  	progress.Update(progressOutput, ld.ID(), "Download complete")
   267  
   268  	logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name())
   269  
   270  	_, err = tmpFile.Seek(0, os.SEEK_SET)
   271  	if err != nil {
   272  		tmpFile.Close()
   273  		if err := os.Remove(tmpFile.Name()); err != nil {
   274  			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
   275  		}
   276  		ld.tmpFile = nil
   277  		ld.verifier = nil
   278  		return nil, 0, xfer.DoNotRetry{Err: err}
   279  	}
   280  
   281  	// hand off the temporary file to the download manager, so it will only
   282  	// be closed once
   283  	ld.tmpFile = nil
   284  
   285  	return ioutils.NewReadCloserWrapper(tmpFile, func() error {
   286  		tmpFile.Close()
   287  		err := os.RemoveAll(tmpFile.Name())
   288  		if err != nil {
   289  			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
   290  		}
   291  		return err
   292  	}), size, nil
   293  }
   294  
   295  func (ld *v2LayerDescriptor) Close() {
   296  	if ld.tmpFile != nil {
   297  		ld.tmpFile.Close()
   298  		if err := os.RemoveAll(ld.tmpFile.Name()); err != nil {
   299  			logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
   300  		}
   301  	}
   302  }
   303  
   304  func (ld *v2LayerDescriptor) truncateDownloadFile() error {
   305  	// Need a new hash context since we will be redoing the download
   306  	ld.verifier = nil
   307  
   308  	if _, err := ld.tmpFile.Seek(0, os.SEEK_SET); err != nil {
   309  		logrus.Errorf("error seeking to beginning of download file: %v", err)
   310  		return err
   311  	}
   312  
   313  	if err := ld.tmpFile.Truncate(0); err != nil {
   314  		logrus.Errorf("error truncating download file: %v", err)
   315  		return err
   316  	}
   317  
   318  	return nil
   319  }
   320  
   321  func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) {
   322  	// Cache mapping from this layer's DiffID to the blobsum
   323  	ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.FullName()})
   324  }
   325  
   326  func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) {
   327  	manSvc, err := p.repo.Manifests(ctx)
   328  	if err != nil {
   329  		return false, err
   330  	}
   331  
   332  	var (
   333  		manifest    distribution.Manifest
   334  		tagOrDigest string // Used for logging/progress only
   335  	)
   336  	if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
   337  		// NOTE: not using TagService.Get, since it uses HEAD requests
   338  		// against the manifests endpoint, which are not supported by
   339  		// all registry versions.
   340  		manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag()))
   341  		if err != nil {
   342  			return false, allowV1Fallback(err)
   343  		}
   344  		tagOrDigest = tagged.Tag()
   345  	} else if digested, isDigested := ref.(reference.Canonical); isDigested {
   346  		manifest, err = manSvc.Get(ctx, digested.Digest())
   347  		if err != nil {
   348  			return false, err
   349  		}
   350  		tagOrDigest = digested.Digest().String()
   351  	} else {
   352  		return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String())
   353  	}
   354  
   355  	if manifest == nil {
   356  		return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest)
   357  	}
   358  
   359  	// If manSvc.Get succeeded, we can be confident that the registry on
   360  	// the other side speaks the v2 protocol.
   361  	p.confirmedV2 = true
   362  
   363  	logrus.Debugf("Pulling ref from V2 registry: %s", ref.String())
   364  	progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Named().Name())
   365  
   366  	var (
   367  		imageID        image.ID
   368  		manifestDigest digest.Digest
   369  	)
   370  
   371  	switch v := manifest.(type) {
   372  	case *schema1.SignedManifest:
   373  		imageID, manifestDigest, err = p.pullSchema1(ctx, ref, v)
   374  		if err != nil {
   375  			return false, err
   376  		}
   377  	case *schema2.DeserializedManifest:
   378  		imageID, manifestDigest, err = p.pullSchema2(ctx, ref, v)
   379  		if err != nil {
   380  			return false, err
   381  		}
   382  	case *manifestlist.DeserializedManifestList:
   383  		imageID, manifestDigest, err = p.pullManifestList(ctx, ref, v)
   384  		if err != nil {
   385  			return false, err
   386  		}
   387  	default:
   388  		return false, errors.New("unsupported manifest format")
   389  	}
   390  
   391  	progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())
   392  
   393  	oldTagImageID, err := p.config.ReferenceStore.Get(ref)
   394  	if err == nil {
   395  		if oldTagImageID == imageID {
   396  			return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, imageID)
   397  		}
   398  	} else if err != reference.ErrDoesNotExist {
   399  		return false, err
   400  	}
   401  
   402  	if canonical, ok := ref.(reference.Canonical); ok {
   403  		if err = p.config.ReferenceStore.AddDigest(canonical, imageID, true); err != nil {
   404  			return false, err
   405  		}
   406  	} else {
   407  		if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, imageID); err != nil {
   408  			return false, err
   409  		}
   410  		if err = p.config.ReferenceStore.AddTag(ref, imageID, true); err != nil {
   411  			return false, err
   412  		}
   413  	}
   414  	return true, nil
   415  }
   416  
   417  func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) {
   418  	var verifiedManifest *schema1.Manifest
   419  	verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref)
   420  	if err != nil {
   421  		return "", "", err
   422  	}
   423  
   424  	rootFS := image.NewRootFS()
   425  
   426  	if err := detectBaseLayer(p.config.ImageStore, verifiedManifest, rootFS); err != nil {
   427  		return "", "", err
   428  	}
   429  
   430  	// remove duplicate layers and check parent chain validity
   431  	err = fixManifestLayers(verifiedManifest)
   432  	if err != nil {
   433  		return "", "", err
   434  	}
   435  
   436  	var descriptors []xfer.DownloadDescriptor
   437  
   438  	// Image history converted to the new format
   439  	var history []image.History
   440  
   441  	// Note that the order of this loop is in the direction of bottom-most
   442  	// to top-most, so that the downloads slice gets ordered correctly.
   443  	for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- {
   444  		blobSum := verifiedManifest.FSLayers[i].BlobSum
   445  
   446  		var throwAway struct {
   447  			ThrowAway bool `json:"throwaway,omitempty"`
   448  		}
   449  		if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil {
   450  			return "", "", err
   451  		}
   452  
   453  		h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway)
   454  		if err != nil {
   455  			return "", "", err
   456  		}
   457  		history = append(history, h)
   458  
   459  		if throwAway.ThrowAway {
   460  			continue
   461  		}
   462  
   463  		layerDescriptor := &v2LayerDescriptor{
   464  			digest:            blobSum,
   465  			repoInfo:          p.repoInfo,
   466  			repo:              p.repo,
   467  			V2MetadataService: p.V2MetadataService,
   468  		}
   469  
   470  		descriptors = append(descriptors, layerDescriptor)
   471  	}
   472  
   473  	resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput)
   474  	if err != nil {
   475  		return "", "", err
   476  	}
   477  	defer release()
   478  
   479  	config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history)
   480  	if err != nil {
   481  		return "", "", err
   482  	}
   483  
   484  	imageID, err = p.config.ImageStore.Create(config)
   485  	if err != nil {
   486  		return "", "", err
   487  	}
   488  
   489  	manifestDigest = digest.FromBytes(unverifiedManifest.Canonical)
   490  
   491  	return imageID, manifestDigest, nil
   492  }
   493  
   494  func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) {
   495  	manifestDigest, err = schema2ManifestDigest(ref, mfst)
   496  	if err != nil {
   497  		return "", "", err
   498  	}
   499  
   500  	target := mfst.Target()
   501  	imageID = image.ID(target.Digest)
   502  	if _, err := p.config.ImageStore.Get(imageID); err == nil {
   503  		// If the image already exists locally, no need to pull
   504  		// anything.
   505  		return imageID, manifestDigest, nil
   506  	}
   507  
   508  	var descriptors []xfer.DownloadDescriptor
   509  
   510  	// Note that the order of this loop is in the direction of bottom-most
   511  	// to top-most, so that the downloads slice gets ordered correctly.
   512  	for _, d := range mfst.Layers {
   513  		layerDescriptor := &v2LayerDescriptor{
   514  			digest:            d.Digest,
   515  			repo:              p.repo,
   516  			repoInfo:          p.repoInfo,
   517  			V2MetadataService: p.V2MetadataService,
   518  			src:               d,
   519  		}
   520  
   521  		descriptors = append(descriptors, layerDescriptor)
   522  	}
   523  
   524  	configChan := make(chan []byte, 1)
   525  	errChan := make(chan error, 1)
   526  	var cancel func()
   527  	ctx, cancel = context.WithCancel(ctx)
   528  
   529  	// Pull the image config
   530  	go func() {
   531  		configJSON, err := p.pullSchema2ImageConfig(ctx, target.Digest)
   532  		if err != nil {
   533  			errChan <- ImageConfigPullError{Err: err}
   534  			cancel()
   535  			return
   536  		}
   537  		configChan <- configJSON
   538  	}()
   539  
   540  	var (
   541  		configJSON         []byte       // raw serialized image config
   542  		unmarshalledConfig image.Image  // deserialized image config
   543  		downloadRootFS     image.RootFS // rootFS to use for registering layers.
   544  	)
   545  	if runtime.GOOS == "windows" {
   546  		configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan)
   547  		if err != nil {
   548  			return "", "", err
   549  		}
   550  		if unmarshalledConfig.RootFS == nil {
   551  			return "", "", errors.New("image config has no rootfs section")
   552  		}
   553  		downloadRootFS = *unmarshalledConfig.RootFS
   554  		downloadRootFS.DiffIDs = []layer.DiffID{}
   555  	} else {
   556  		downloadRootFS = *image.NewRootFS()
   557  	}
   558  
   559  	rootFS, release, err := p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput)
   560  	if err != nil {
   561  		if configJSON != nil {
   562  			// Already received the config
   563  			return "", "", err
   564  		}
   565  		select {
   566  		case err = <-errChan:
   567  			return "", "", err
   568  		default:
   569  			cancel()
   570  			select {
   571  			case <-configChan:
   572  			case <-errChan:
   573  			}
   574  			return "", "", err
   575  		}
   576  	}
   577  	defer release()
   578  
   579  	if configJSON == nil {
   580  		configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan)
   581  		if err != nil {
   582  			return "", "", err
   583  		}
   584  	}
   585  
   586  	// The DiffIDs returned in rootFS MUST match those in the config.
   587  	// Otherwise the image config could be referencing layers that aren't
   588  	// included in the manifest.
   589  	if len(rootFS.DiffIDs) != len(unmarshalledConfig.RootFS.DiffIDs) {
   590  		return "", "", errRootFSMismatch
   591  	}
   592  
   593  	for i := range rootFS.DiffIDs {
   594  		if rootFS.DiffIDs[i] != unmarshalledConfig.RootFS.DiffIDs[i] {
   595  			return "", "", errRootFSMismatch
   596  		}
   597  	}
   598  
   599  	imageID, err = p.config.ImageStore.Create(configJSON)
   600  	if err != nil {
   601  		return "", "", err
   602  	}
   603  
   604  	return imageID, manifestDigest, nil
   605  }
   606  
   607  func receiveConfig(configChan <-chan []byte, errChan <-chan error) ([]byte, image.Image, error) {
   608  	select {
   609  	case configJSON := <-configChan:
   610  		var unmarshalledConfig image.Image
   611  		if err := json.Unmarshal(configJSON, &unmarshalledConfig); err != nil {
   612  			return nil, image.Image{}, err
   613  		}
   614  		return configJSON, unmarshalledConfig, nil
   615  	case err := <-errChan:
   616  		return nil, image.Image{}, err
   617  		// Don't need a case for ctx.Done in the select because cancellation
   618  		// will trigger an error in p.pullSchema2ImageConfig.
   619  	}
   620  }
   621  
   622  // pullManifestList handles "manifest lists" which point to various
   623  // platform-specifc manifests.
   624  func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (imageID image.ID, manifestListDigest digest.Digest, err error) {
   625  	manifestListDigest, err = schema2ManifestDigest(ref, mfstList)
   626  	if err != nil {
   627  		return "", "", err
   628  	}
   629  
   630  	var manifestDigest digest.Digest
   631  	for _, manifestDescriptor := range mfstList.Manifests {
   632  		// TODO(aaronl): The manifest list spec supports optional
   633  		// "features" and "variant" fields. These are not yet used.
   634  		// Once they are, their values should be interpreted here.
   635  		if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS {
   636  			manifestDigest = manifestDescriptor.Digest
   637  			break
   638  		}
   639  	}
   640  
   641  	if manifestDigest == "" {
   642  		return "", "", errors.New("no supported platform found in manifest list")
   643  	}
   644  
   645  	manSvc, err := p.repo.Manifests(ctx)
   646  	if err != nil {
   647  		return "", "", err
   648  	}
   649  
   650  	manifest, err := manSvc.Get(ctx, manifestDigest)
   651  	if err != nil {
   652  		return "", "", err
   653  	}
   654  
   655  	manifestRef, err := reference.WithDigest(ref, manifestDigest)
   656  	if err != nil {
   657  		return "", "", err
   658  	}
   659  
   660  	switch v := manifest.(type) {
   661  	case *schema1.SignedManifest:
   662  		imageID, _, err = p.pullSchema1(ctx, manifestRef, v)
   663  		if err != nil {
   664  			return "", "", err
   665  		}
   666  	case *schema2.DeserializedManifest:
   667  		imageID, _, err = p.pullSchema2(ctx, manifestRef, v)
   668  		if err != nil {
   669  			return "", "", err
   670  		}
   671  	default:
   672  		return "", "", errors.New("unsupported manifest format")
   673  	}
   674  
   675  	return imageID, manifestListDigest, err
   676  }
   677  
   678  func (p *v2Puller) pullSchema2ImageConfig(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) {
   679  	blobs := p.repo.Blobs(ctx)
   680  	configJSON, err = blobs.Get(ctx, dgst)
   681  	if err != nil {
   682  		return nil, err
   683  	}
   684  
   685  	// Verify image config digest
   686  	verifier, err := digest.NewDigestVerifier(dgst)
   687  	if err != nil {
   688  		return nil, err
   689  	}
   690  	if _, err := verifier.Write(configJSON); err != nil {
   691  		return nil, err
   692  	}
   693  	if !verifier.Verified() {
   694  		err := fmt.Errorf("image config verification failed for digest %s", dgst)
   695  		logrus.Error(err)
   696  		return nil, err
   697  	}
   698  
   699  	return configJSON, nil
   700  }
   701  
   702  // schema2ManifestDigest computes the manifest digest, and, if pulling by
   703  // digest, ensures that it matches the requested digest.
   704  func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) {
   705  	_, canonical, err := mfst.Payload()
   706  	if err != nil {
   707  		return "", err
   708  	}
   709  
   710  	// If pull by digest, then verify the manifest digest.
   711  	if digested, isDigested := ref.(reference.Canonical); isDigested {
   712  		verifier, err := digest.NewDigestVerifier(digested.Digest())
   713  		if err != nil {
   714  			return "", err
   715  		}
   716  		if _, err := verifier.Write(canonical); err != nil {
   717  			return "", err
   718  		}
   719  		if !verifier.Verified() {
   720  			err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest())
   721  			logrus.Error(err)
   722  			return "", err
   723  		}
   724  		return digested.Digest(), nil
   725  	}
   726  
   727  	return digest.FromBytes(canonical), nil
   728  }
   729  
   730  // allowV1Fallback checks if the error is a possible reason to fallback to v1
   731  // (even if confirmedV2 has been set already), and if so, wraps the error in
   732  // a fallbackError with confirmedV2 set to false. Otherwise, it returns the
   733  // error unmodified.
   734  func allowV1Fallback(err error) error {
   735  	switch v := err.(type) {
   736  	case errcode.Errors:
   737  		if len(v) != 0 {
   738  			if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) {
   739  				return fallbackError{
   740  					err:         err,
   741  					confirmedV2: false,
   742  					transportOK: true,
   743  				}
   744  			}
   745  		}
   746  	case errcode.Error:
   747  		if shouldV2Fallback(v) {
   748  			return fallbackError{
   749  				err:         err,
   750  				confirmedV2: false,
   751  				transportOK: true,
   752  			}
   753  		}
   754  	case *url.Error:
   755  		if v.Err == auth.ErrNoBasicAuthCredentials {
   756  			return fallbackError{err: err, confirmedV2: false}
   757  		}
   758  	}
   759  
   760  	return err
   761  }
   762  
   763  func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) {
   764  	// If pull by digest, then verify the manifest digest. NOTE: It is
   765  	// important to do this first, before any other content validation. If the
   766  	// digest cannot be verified, don't even bother with those other things.
   767  	if digested, isCanonical := ref.(reference.Canonical); isCanonical {
   768  		verifier, err := digest.NewDigestVerifier(digested.Digest())
   769  		if err != nil {
   770  			return nil, err
   771  		}
   772  		if _, err := verifier.Write(signedManifest.Canonical); err != nil {
   773  			return nil, err
   774  		}
   775  		if !verifier.Verified() {
   776  			err := fmt.Errorf("image verification failed for digest %s", digested.Digest())
   777  			logrus.Error(err)
   778  			return nil, err
   779  		}
   780  	}
   781  	m = &signedManifest.Manifest
   782  
   783  	if m.SchemaVersion != 1 {
   784  		return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String())
   785  	}
   786  	if len(m.FSLayers) != len(m.History) {
   787  		return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String())
   788  	}
   789  	if len(m.FSLayers) == 0 {
   790  		return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String())
   791  	}
   792  	return m, nil
   793  }
   794  
   795  // fixManifestLayers removes repeated layers from the manifest and checks the
   796  // correctness of the parent chain.
   797  func fixManifestLayers(m *schema1.Manifest) error {
   798  	imgs := make([]*image.V1Image, len(m.FSLayers))
   799  	for i := range m.FSLayers {
   800  		img := &image.V1Image{}
   801  
   802  		if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil {
   803  			return err
   804  		}
   805  
   806  		imgs[i] = img
   807  		if err := v1.ValidateID(img.ID); err != nil {
   808  			return err
   809  		}
   810  	}
   811  
   812  	if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" {
   813  		// Windows base layer can point to a base layer parent that is not in manifest.
   814  		return errors.New("Invalid parent ID in the base layer of the image.")
   815  	}
   816  
   817  	// check general duplicates to error instead of a deadlock
   818  	idmap := make(map[string]struct{})
   819  
   820  	var lastID string
   821  	for _, img := range imgs {
   822  		// skip IDs that appear after each other, we handle those later
   823  		if _, exists := idmap[img.ID]; img.ID != lastID && exists {
   824  			return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
   825  		}
   826  		lastID = img.ID
   827  		idmap[lastID] = struct{}{}
   828  	}
   829  
   830  	// backwards loop so that we keep the remaining indexes after removing items
   831  	for i := len(imgs) - 2; i >= 0; i-- {
   832  		if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
   833  			m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
   834  			m.History = append(m.History[:i], m.History[i+1:]...)
   835  		} else if imgs[i].Parent != imgs[i+1].ID {
   836  			return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent)
   837  		}
   838  	}
   839  
   840  	return nil
   841  }
   842  
   843  func createDownloadFile() (*os.File, error) {
   844  	return ioutil.TempFile("", "GetImageBlob")
   845  }