github.com/dinever/docker@v1.11.1/distribution/pull_v2.go (about)

     1  package distribution
     2  
     3  import (
     4  	"encoding/json"
     5  	"errors"
     6  	"fmt"
     7  	"io"
     8  	"io/ioutil"
     9  	"net/url"
    10  	"os"
    11  	"runtime"
    12  
    13  	"github.com/Sirupsen/logrus"
    14  	"github.com/docker/distribution"
    15  	"github.com/docker/distribution/digest"
    16  	"github.com/docker/distribution/manifest/manifestlist"
    17  	"github.com/docker/distribution/manifest/schema1"
    18  	"github.com/docker/distribution/manifest/schema2"
    19  	"github.com/docker/distribution/registry/api/errcode"
    20  	"github.com/docker/distribution/registry/client/auth"
    21  	"github.com/docker/distribution/registry/client/transport"
    22  	"github.com/docker/docker/distribution/metadata"
    23  	"github.com/docker/docker/distribution/xfer"
    24  	"github.com/docker/docker/image"
    25  	"github.com/docker/docker/image/v1"
    26  	"github.com/docker/docker/layer"
    27  	"github.com/docker/docker/pkg/ioutils"
    28  	"github.com/docker/docker/pkg/progress"
    29  	"github.com/docker/docker/pkg/stringid"
    30  	"github.com/docker/docker/reference"
    31  	"github.com/docker/docker/registry"
    32  	"golang.org/x/net/context"
    33  )
    34  
    35  var errRootFSMismatch = errors.New("layers from manifest don't match image configuration")
    36  
    37  // ImageConfigPullError is an error pulling the image config blob
    38  // (only applies to schema2).
    39  type ImageConfigPullError struct {
    40  	Err error
    41  }
    42  
    43  // Error returns the error string for ImageConfigPullError.
    44  func (e ImageConfigPullError) Error() string {
    45  	return "error pulling image configuration: " + e.Err.Error()
    46  }
    47  
    48  type v2Puller struct {
    49  	V2MetadataService *metadata.V2MetadataService
    50  	endpoint          registry.APIEndpoint
    51  	config            *ImagePullConfig
    52  	repoInfo          *registry.RepositoryInfo
    53  	repo              distribution.Repository
    54  	// confirmedV2 is set to true if we confirm we're talking to a v2
    55  	// registry. This is used to limit fallbacks to the v1 protocol.
    56  	confirmedV2 bool
    57  }
    58  
    59  func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) {
    60  	// TODO(tiborvass): was ReceiveTimeout
    61  	p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull")
    62  	if err != nil {
    63  		logrus.Warnf("Error getting v2 registry: %v", err)
    64  		return err
    65  	}
    66  
    67  	if err = p.pullV2Repository(ctx, ref); err != nil {
    68  		if _, ok := err.(fallbackError); ok {
    69  			return err
    70  		}
    71  		if continueOnError(err) {
    72  			logrus.Errorf("Error trying v2 registry: %v", err)
    73  			return fallbackError{
    74  				err:         err,
    75  				confirmedV2: p.confirmedV2,
    76  				transportOK: true,
    77  			}
    78  		}
    79  	}
    80  	return err
    81  }
    82  
    83  func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) {
    84  	var layersDownloaded bool
    85  	if !reference.IsNameOnly(ref) {
    86  		layersDownloaded, err = p.pullV2Tag(ctx, ref)
    87  		if err != nil {
    88  			return err
    89  		}
    90  	} else {
    91  		tags, err := p.repo.Tags(ctx).All(ctx)
    92  		if err != nil {
    93  			// If this repository doesn't exist on V2, we should
    94  			// permit a fallback to V1.
    95  			return allowV1Fallback(err)
    96  		}
    97  
    98  		// The v2 registry knows about this repository, so we will not
    99  		// allow fallback to the v1 protocol even if we encounter an
   100  		// error later on.
   101  		p.confirmedV2 = true
   102  
   103  		for _, tag := range tags {
   104  			tagRef, err := reference.WithTag(ref, tag)
   105  			if err != nil {
   106  				return err
   107  			}
   108  			pulledNew, err := p.pullV2Tag(ctx, tagRef)
   109  			if err != nil {
   110  				// Since this is the pull-all-tags case, don't
   111  				// allow an error pulling a particular tag to
   112  				// make the whole pull fall back to v1.
   113  				if fallbackErr, ok := err.(fallbackError); ok {
   114  					return fallbackErr.err
   115  				}
   116  				return err
   117  			}
   118  			// pulledNew is true if either new layers were downloaded OR if existing images were newly tagged
   119  			// TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus?
   120  			layersDownloaded = layersDownloaded || pulledNew
   121  		}
   122  	}
   123  
   124  	writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded)
   125  
   126  	return nil
   127  }
   128  
   129  type v2LayerDescriptor struct {
   130  	digest            digest.Digest
   131  	repoInfo          *registry.RepositoryInfo
   132  	repo              distribution.Repository
   133  	V2MetadataService *metadata.V2MetadataService
   134  	tmpFile           *os.File
   135  	verifier          digest.Verifier
   136  }
   137  
   138  func (ld *v2LayerDescriptor) Key() string {
   139  	return "v2:" + ld.digest.String()
   140  }
   141  
   142  func (ld *v2LayerDescriptor) ID() string {
   143  	return stringid.TruncateID(ld.digest.String())
   144  }
   145  
   146  func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) {
   147  	return ld.V2MetadataService.GetDiffID(ld.digest)
   148  }
   149  
   150  func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
   151  	logrus.Debugf("pulling blob %q", ld.digest)
   152  
   153  	var (
   154  		err    error
   155  		offset int64
   156  	)
   157  
   158  	if ld.tmpFile == nil {
   159  		ld.tmpFile, err = createDownloadFile()
   160  		if err != nil {
   161  			return nil, 0, xfer.DoNotRetry{Err: err}
   162  		}
   163  	} else {
   164  		offset, err = ld.tmpFile.Seek(0, os.SEEK_END)
   165  		if err != nil {
   166  			logrus.Debugf("error seeking to end of download file: %v", err)
   167  			offset = 0
   168  
   169  			ld.tmpFile.Close()
   170  			if err := os.Remove(ld.tmpFile.Name()); err != nil {
   171  				logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
   172  			}
   173  			ld.tmpFile, err = createDownloadFile()
   174  			if err != nil {
   175  				return nil, 0, xfer.DoNotRetry{Err: err}
   176  			}
   177  		} else if offset != 0 {
   178  			logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset)
   179  		}
   180  	}
   181  
   182  	tmpFile := ld.tmpFile
   183  	blobs := ld.repo.Blobs(ctx)
   184  
   185  	layerDownload, err := blobs.Open(ctx, ld.digest)
   186  	if err != nil {
   187  		logrus.Errorf("Error initiating layer download: %v", err)
   188  		if err == distribution.ErrBlobUnknown {
   189  			return nil, 0, xfer.DoNotRetry{Err: err}
   190  		}
   191  		return nil, 0, retryOnError(err)
   192  	}
   193  
   194  	if offset != 0 {
   195  		_, err := layerDownload.Seek(offset, os.SEEK_SET)
   196  		if err != nil {
   197  			if err := ld.truncateDownloadFile(); err != nil {
   198  				return nil, 0, xfer.DoNotRetry{Err: err}
   199  			}
   200  			return nil, 0, err
   201  		}
   202  	}
   203  	size, err := layerDownload.Seek(0, os.SEEK_END)
   204  	if err != nil {
   205  		// Seek failed, perhaps because there was no Content-Length
   206  		// header. This shouldn't fail the download, because we can
   207  		// still continue without a progress bar.
   208  		size = 0
   209  	} else {
   210  		if size != 0 && offset > size {
   211  			logrus.Debugf("Partial download is larger than full blob. Starting over")
   212  			offset = 0
   213  			if err := ld.truncateDownloadFile(); err != nil {
   214  				return nil, 0, xfer.DoNotRetry{Err: err}
   215  			}
   216  		}
   217  
   218  		// Restore the seek offset either at the beginning of the
   219  		// stream, or just after the last byte we have from previous
   220  		// attempts.
   221  		_, err = layerDownload.Seek(offset, os.SEEK_SET)
   222  		if err != nil {
   223  			return nil, 0, err
   224  		}
   225  	}
   226  
   227  	reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading")
   228  	defer reader.Close()
   229  
   230  	if ld.verifier == nil {
   231  		ld.verifier, err = digest.NewDigestVerifier(ld.digest)
   232  		if err != nil {
   233  			return nil, 0, xfer.DoNotRetry{Err: err}
   234  		}
   235  	}
   236  
   237  	_, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier))
   238  	if err != nil {
   239  		if err == transport.ErrWrongCodeForByteRange {
   240  			if err := ld.truncateDownloadFile(); err != nil {
   241  				return nil, 0, xfer.DoNotRetry{Err: err}
   242  			}
   243  			return nil, 0, err
   244  		}
   245  		return nil, 0, retryOnError(err)
   246  	}
   247  
   248  	progress.Update(progressOutput, ld.ID(), "Verifying Checksum")
   249  
   250  	if !ld.verifier.Verified() {
   251  		err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest)
   252  		logrus.Error(err)
   253  
   254  		// Allow a retry if this digest verification error happened
   255  		// after a resumed download.
   256  		if offset != 0 {
   257  			if err := ld.truncateDownloadFile(); err != nil {
   258  				return nil, 0, xfer.DoNotRetry{Err: err}
   259  			}
   260  
   261  			return nil, 0, err
   262  		}
   263  		return nil, 0, xfer.DoNotRetry{Err: err}
   264  	}
   265  
   266  	progress.Update(progressOutput, ld.ID(), "Download complete")
   267  
   268  	logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name())
   269  
   270  	_, err = tmpFile.Seek(0, os.SEEK_SET)
   271  	if err != nil {
   272  		tmpFile.Close()
   273  		if err := os.Remove(tmpFile.Name()); err != nil {
   274  			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
   275  		}
   276  		ld.tmpFile = nil
   277  		ld.verifier = nil
   278  		return nil, 0, xfer.DoNotRetry{Err: err}
   279  	}
   280  
   281  	// hand off the temporary file to the download manager, so it will only
   282  	// be closed once
   283  	ld.tmpFile = nil
   284  
   285  	return ioutils.NewReadCloserWrapper(tmpFile, func() error {
   286  		tmpFile.Close()
   287  		err := os.RemoveAll(tmpFile.Name())
   288  		if err != nil {
   289  			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
   290  		}
   291  		return err
   292  	}), size, nil
   293  }
   294  
   295  func (ld *v2LayerDescriptor) Close() {
   296  	if ld.tmpFile != nil {
   297  		ld.tmpFile.Close()
   298  		if err := os.RemoveAll(ld.tmpFile.Name()); err != nil {
   299  			logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
   300  		}
   301  	}
   302  }
   303  
   304  func (ld *v2LayerDescriptor) truncateDownloadFile() error {
   305  	// Need a new hash context since we will be redoing the download
   306  	ld.verifier = nil
   307  
   308  	if _, err := ld.tmpFile.Seek(0, os.SEEK_SET); err != nil {
   309  		logrus.Errorf("error seeking to beginning of download file: %v", err)
   310  		return err
   311  	}
   312  
   313  	if err := ld.tmpFile.Truncate(0); err != nil {
   314  		logrus.Errorf("error truncating download file: %v", err)
   315  		return err
   316  	}
   317  
   318  	return nil
   319  }
   320  
   321  func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) {
   322  	// Cache mapping from this layer's DiffID to the blobsum
   323  	ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.FullName()})
   324  }
   325  
   326  func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) {
   327  	manSvc, err := p.repo.Manifests(ctx)
   328  	if err != nil {
   329  		return false, err
   330  	}
   331  
   332  	var (
   333  		manifest    distribution.Manifest
   334  		tagOrDigest string // Used for logging/progress only
   335  	)
   336  	if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
   337  		// NOTE: not using TagService.Get, since it uses HEAD requests
   338  		// against the manifests endpoint, which are not supported by
   339  		// all registry versions.
   340  		manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag()))
   341  		if err != nil {
   342  			return false, allowV1Fallback(err)
   343  		}
   344  		tagOrDigest = tagged.Tag()
   345  	} else if digested, isDigested := ref.(reference.Canonical); isDigested {
   346  		manifest, err = manSvc.Get(ctx, digested.Digest())
   347  		if err != nil {
   348  			return false, err
   349  		}
   350  		tagOrDigest = digested.Digest().String()
   351  	} else {
   352  		return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String())
   353  	}
   354  
   355  	if manifest == nil {
   356  		return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest)
   357  	}
   358  
   359  	// If manSvc.Get succeeded, we can be confident that the registry on
   360  	// the other side speaks the v2 protocol.
   361  	p.confirmedV2 = true
   362  
   363  	logrus.Debugf("Pulling ref from V2 registry: %s", ref.String())
   364  	progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Named().Name())
   365  
   366  	var (
   367  		imageID        image.ID
   368  		manifestDigest digest.Digest
   369  	)
   370  
   371  	switch v := manifest.(type) {
   372  	case *schema1.SignedManifest:
   373  		imageID, manifestDigest, err = p.pullSchema1(ctx, ref, v)
   374  		if err != nil {
   375  			return false, err
   376  		}
   377  	case *schema2.DeserializedManifest:
   378  		imageID, manifestDigest, err = p.pullSchema2(ctx, ref, v)
   379  		if err != nil {
   380  			return false, err
   381  		}
   382  	case *manifestlist.DeserializedManifestList:
   383  		imageID, manifestDigest, err = p.pullManifestList(ctx, ref, v)
   384  		if err != nil {
   385  			return false, err
   386  		}
   387  	default:
   388  		return false, errors.New("unsupported manifest format")
   389  	}
   390  
   391  	progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())
   392  
   393  	oldTagImageID, err := p.config.ReferenceStore.Get(ref)
   394  	if err == nil {
   395  		if oldTagImageID == imageID {
   396  			return false, nil
   397  		}
   398  	} else if err != reference.ErrDoesNotExist {
   399  		return false, err
   400  	}
   401  
   402  	if canonical, ok := ref.(reference.Canonical); ok {
   403  		if err = p.config.ReferenceStore.AddDigest(canonical, imageID, true); err != nil {
   404  			return false, err
   405  		}
   406  	} else if err = p.config.ReferenceStore.AddTag(ref, imageID, true); err != nil {
   407  		return false, err
   408  	}
   409  
   410  	return true, nil
   411  }
   412  
   413  func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) {
   414  	var verifiedManifest *schema1.Manifest
   415  	verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref)
   416  	if err != nil {
   417  		return "", "", err
   418  	}
   419  
   420  	rootFS := image.NewRootFS()
   421  
   422  	if err := detectBaseLayer(p.config.ImageStore, verifiedManifest, rootFS); err != nil {
   423  		return "", "", err
   424  	}
   425  
   426  	// remove duplicate layers and check parent chain validity
   427  	err = fixManifestLayers(verifiedManifest)
   428  	if err != nil {
   429  		return "", "", err
   430  	}
   431  
   432  	var descriptors []xfer.DownloadDescriptor
   433  
   434  	// Image history converted to the new format
   435  	var history []image.History
   436  
   437  	// Note that the order of this loop is in the direction of bottom-most
   438  	// to top-most, so that the downloads slice gets ordered correctly.
   439  	for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- {
   440  		blobSum := verifiedManifest.FSLayers[i].BlobSum
   441  
   442  		var throwAway struct {
   443  			ThrowAway bool `json:"throwaway,omitempty"`
   444  		}
   445  		if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil {
   446  			return "", "", err
   447  		}
   448  
   449  		h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway)
   450  		if err != nil {
   451  			return "", "", err
   452  		}
   453  		history = append(history, h)
   454  
   455  		if throwAway.ThrowAway {
   456  			continue
   457  		}
   458  
   459  		layerDescriptor := &v2LayerDescriptor{
   460  			digest:            blobSum,
   461  			repoInfo:          p.repoInfo,
   462  			repo:              p.repo,
   463  			V2MetadataService: p.V2MetadataService,
   464  		}
   465  
   466  		descriptors = append(descriptors, layerDescriptor)
   467  	}
   468  
   469  	resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput)
   470  	if err != nil {
   471  		return "", "", err
   472  	}
   473  	defer release()
   474  
   475  	config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history)
   476  	if err != nil {
   477  		return "", "", err
   478  	}
   479  
   480  	imageID, err = p.config.ImageStore.Create(config)
   481  	if err != nil {
   482  		return "", "", err
   483  	}
   484  
   485  	manifestDigest = digest.FromBytes(unverifiedManifest.Canonical)
   486  
   487  	return imageID, manifestDigest, nil
   488  }
   489  
   490  func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) {
   491  	manifestDigest, err = schema2ManifestDigest(ref, mfst)
   492  	if err != nil {
   493  		return "", "", err
   494  	}
   495  
   496  	target := mfst.Target()
   497  	imageID = image.ID(target.Digest)
   498  	if _, err := p.config.ImageStore.Get(imageID); err == nil {
   499  		// If the image already exists locally, no need to pull
   500  		// anything.
   501  		return imageID, manifestDigest, nil
   502  	}
   503  
   504  	configChan := make(chan []byte, 1)
   505  	errChan := make(chan error, 1)
   506  	var cancel func()
   507  	ctx, cancel = context.WithCancel(ctx)
   508  
   509  	// Pull the image config
   510  	go func() {
   511  		configJSON, err := p.pullSchema2ImageConfig(ctx, target.Digest)
   512  		if err != nil {
   513  			errChan <- ImageConfigPullError{Err: err}
   514  			cancel()
   515  			return
   516  		}
   517  		configChan <- configJSON
   518  	}()
   519  
   520  	var descriptors []xfer.DownloadDescriptor
   521  
   522  	// Note that the order of this loop is in the direction of bottom-most
   523  	// to top-most, so that the downloads slice gets ordered correctly.
   524  	for _, d := range mfst.References() {
   525  		layerDescriptor := &v2LayerDescriptor{
   526  			digest:            d.Digest,
   527  			repo:              p.repo,
   528  			repoInfo:          p.repoInfo,
   529  			V2MetadataService: p.V2MetadataService,
   530  		}
   531  
   532  		descriptors = append(descriptors, layerDescriptor)
   533  	}
   534  
   535  	var (
   536  		configJSON         []byte       // raw serialized image config
   537  		unmarshalledConfig image.Image  // deserialized image config
   538  		downloadRootFS     image.RootFS // rootFS to use for registering layers.
   539  	)
   540  	if runtime.GOOS == "windows" {
   541  		configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan)
   542  		if err != nil {
   543  			return "", "", err
   544  		}
   545  		if unmarshalledConfig.RootFS == nil {
   546  			return "", "", errors.New("image config has no rootfs section")
   547  		}
   548  		downloadRootFS = *unmarshalledConfig.RootFS
   549  		downloadRootFS.DiffIDs = []layer.DiffID{}
   550  	} else {
   551  		downloadRootFS = *image.NewRootFS()
   552  	}
   553  
   554  	rootFS, release, err := p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput)
   555  	if err != nil {
   556  		if configJSON != nil {
   557  			// Already received the config
   558  			return "", "", err
   559  		}
   560  		select {
   561  		case err = <-errChan:
   562  			return "", "", err
   563  		default:
   564  			cancel()
   565  			select {
   566  			case <-configChan:
   567  			case <-errChan:
   568  			}
   569  			return "", "", err
   570  		}
   571  	}
   572  	defer release()
   573  
   574  	if configJSON == nil {
   575  		configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan)
   576  		if err != nil {
   577  			return "", "", err
   578  		}
   579  	}
   580  
   581  	// The DiffIDs returned in rootFS MUST match those in the config.
   582  	// Otherwise the image config could be referencing layers that aren't
   583  	// included in the manifest.
   584  	if len(rootFS.DiffIDs) != len(unmarshalledConfig.RootFS.DiffIDs) {
   585  		return "", "", errRootFSMismatch
   586  	}
   587  
   588  	for i := range rootFS.DiffIDs {
   589  		if rootFS.DiffIDs[i] != unmarshalledConfig.RootFS.DiffIDs[i] {
   590  			return "", "", errRootFSMismatch
   591  		}
   592  	}
   593  
   594  	imageID, err = p.config.ImageStore.Create(configJSON)
   595  	if err != nil {
   596  		return "", "", err
   597  	}
   598  
   599  	return imageID, manifestDigest, nil
   600  }
   601  
   602  func receiveConfig(configChan <-chan []byte, errChan <-chan error) ([]byte, image.Image, error) {
   603  	select {
   604  	case configJSON := <-configChan:
   605  		var unmarshalledConfig image.Image
   606  		if err := json.Unmarshal(configJSON, &unmarshalledConfig); err != nil {
   607  			return nil, image.Image{}, err
   608  		}
   609  		return configJSON, unmarshalledConfig, nil
   610  	case err := <-errChan:
   611  		return nil, image.Image{}, err
   612  		// Don't need a case for ctx.Done in the select because cancellation
   613  		// will trigger an error in p.pullSchema2ImageConfig.
   614  	}
   615  }
   616  
   617  // pullManifestList handles "manifest lists" which point to various
   618  // platform-specifc manifests.
   619  func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (imageID image.ID, manifestListDigest digest.Digest, err error) {
   620  	manifestListDigest, err = schema2ManifestDigest(ref, mfstList)
   621  	if err != nil {
   622  		return "", "", err
   623  	}
   624  
   625  	var manifestDigest digest.Digest
   626  	for _, manifestDescriptor := range mfstList.Manifests {
   627  		// TODO(aaronl): The manifest list spec supports optional
   628  		// "features" and "variant" fields. These are not yet used.
   629  		// Once they are, their values should be interpreted here.
   630  		if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS {
   631  			manifestDigest = manifestDescriptor.Digest
   632  			break
   633  		}
   634  	}
   635  
   636  	if manifestDigest == "" {
   637  		return "", "", errors.New("no supported platform found in manifest list")
   638  	}
   639  
   640  	manSvc, err := p.repo.Manifests(ctx)
   641  	if err != nil {
   642  		return "", "", err
   643  	}
   644  
   645  	manifest, err := manSvc.Get(ctx, manifestDigest)
   646  	if err != nil {
   647  		return "", "", err
   648  	}
   649  
   650  	manifestRef, err := reference.WithDigest(ref, manifestDigest)
   651  	if err != nil {
   652  		return "", "", err
   653  	}
   654  
   655  	switch v := manifest.(type) {
   656  	case *schema1.SignedManifest:
   657  		imageID, _, err = p.pullSchema1(ctx, manifestRef, v)
   658  		if err != nil {
   659  			return "", "", err
   660  		}
   661  	case *schema2.DeserializedManifest:
   662  		imageID, _, err = p.pullSchema2(ctx, manifestRef, v)
   663  		if err != nil {
   664  			return "", "", err
   665  		}
   666  	default:
   667  		return "", "", errors.New("unsupported manifest format")
   668  	}
   669  
   670  	return imageID, manifestListDigest, err
   671  }
   672  
   673  func (p *v2Puller) pullSchema2ImageConfig(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) {
   674  	blobs := p.repo.Blobs(ctx)
   675  	configJSON, err = blobs.Get(ctx, dgst)
   676  	if err != nil {
   677  		return nil, err
   678  	}
   679  
   680  	// Verify image config digest
   681  	verifier, err := digest.NewDigestVerifier(dgst)
   682  	if err != nil {
   683  		return nil, err
   684  	}
   685  	if _, err := verifier.Write(configJSON); err != nil {
   686  		return nil, err
   687  	}
   688  	if !verifier.Verified() {
   689  		err := fmt.Errorf("image config verification failed for digest %s", dgst)
   690  		logrus.Error(err)
   691  		return nil, err
   692  	}
   693  
   694  	return configJSON, nil
   695  }
   696  
   697  // schema2ManifestDigest computes the manifest digest, and, if pulling by
   698  // digest, ensures that it matches the requested digest.
   699  func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) {
   700  	_, canonical, err := mfst.Payload()
   701  	if err != nil {
   702  		return "", err
   703  	}
   704  
   705  	// If pull by digest, then verify the manifest digest.
   706  	if digested, isDigested := ref.(reference.Canonical); isDigested {
   707  		verifier, err := digest.NewDigestVerifier(digested.Digest())
   708  		if err != nil {
   709  			return "", err
   710  		}
   711  		if _, err := verifier.Write(canonical); err != nil {
   712  			return "", err
   713  		}
   714  		if !verifier.Verified() {
   715  			err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest())
   716  			logrus.Error(err)
   717  			return "", err
   718  		}
   719  		return digested.Digest(), nil
   720  	}
   721  
   722  	return digest.FromBytes(canonical), nil
   723  }
   724  
   725  // allowV1Fallback checks if the error is a possible reason to fallback to v1
   726  // (even if confirmedV2 has been set already), and if so, wraps the error in
   727  // a fallbackError with confirmedV2 set to false. Otherwise, it returns the
   728  // error unmodified.
   729  func allowV1Fallback(err error) error {
   730  	switch v := err.(type) {
   731  	case errcode.Errors:
   732  		if len(v) != 0 {
   733  			if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) {
   734  				return fallbackError{
   735  					err:         err,
   736  					confirmedV2: false,
   737  					transportOK: true,
   738  				}
   739  			}
   740  		}
   741  	case errcode.Error:
   742  		if shouldV2Fallback(v) {
   743  			return fallbackError{
   744  				err:         err,
   745  				confirmedV2: false,
   746  				transportOK: true,
   747  			}
   748  		}
   749  	case *url.Error:
   750  		if v.Err == auth.ErrNoBasicAuthCredentials {
   751  			return fallbackError{err: err, confirmedV2: false}
   752  		}
   753  	}
   754  
   755  	return err
   756  }
   757  
   758  func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) {
   759  	// If pull by digest, then verify the manifest digest. NOTE: It is
   760  	// important to do this first, before any other content validation. If the
   761  	// digest cannot be verified, don't even bother with those other things.
   762  	if digested, isCanonical := ref.(reference.Canonical); isCanonical {
   763  		verifier, err := digest.NewDigestVerifier(digested.Digest())
   764  		if err != nil {
   765  			return nil, err
   766  		}
   767  		if _, err := verifier.Write(signedManifest.Canonical); err != nil {
   768  			return nil, err
   769  		}
   770  		if !verifier.Verified() {
   771  			err := fmt.Errorf("image verification failed for digest %s", digested.Digest())
   772  			logrus.Error(err)
   773  			return nil, err
   774  		}
   775  	}
   776  	m = &signedManifest.Manifest
   777  
   778  	if m.SchemaVersion != 1 {
   779  		return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String())
   780  	}
   781  	if len(m.FSLayers) != len(m.History) {
   782  		return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String())
   783  	}
   784  	if len(m.FSLayers) == 0 {
   785  		return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String())
   786  	}
   787  	return m, nil
   788  }
   789  
   790  // fixManifestLayers removes repeated layers from the manifest and checks the
   791  // correctness of the parent chain.
   792  func fixManifestLayers(m *schema1.Manifest) error {
   793  	imgs := make([]*image.V1Image, len(m.FSLayers))
   794  	for i := range m.FSLayers {
   795  		img := &image.V1Image{}
   796  
   797  		if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil {
   798  			return err
   799  		}
   800  
   801  		imgs[i] = img
   802  		if err := v1.ValidateID(img.ID); err != nil {
   803  			return err
   804  		}
   805  	}
   806  
   807  	if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" {
   808  		// Windows base layer can point to a base layer parent that is not in manifest.
   809  		return errors.New("Invalid parent ID in the base layer of the image.")
   810  	}
   811  
   812  	// check general duplicates to error instead of a deadlock
   813  	idmap := make(map[string]struct{})
   814  
   815  	var lastID string
   816  	for _, img := range imgs {
   817  		// skip IDs that appear after each other, we handle those later
   818  		if _, exists := idmap[img.ID]; img.ID != lastID && exists {
   819  			return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
   820  		}
   821  		lastID = img.ID
   822  		idmap[lastID] = struct{}{}
   823  	}
   824  
   825  	// backwards loop so that we keep the remaining indexes after removing items
   826  	for i := len(imgs) - 2; i >= 0; i-- {
   827  		if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
   828  			m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
   829  			m.History = append(m.History[:i], m.History[i+1:]...)
   830  		} else if imgs[i].Parent != imgs[i+1].ID {
   831  			return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent)
   832  		}
   833  	}
   834  
   835  	return nil
   836  }
   837  
   838  func createDownloadFile() (*os.File, error) {
   839  	return ioutil.TempFile("", "GetImageBlob")
   840  }