github.com/uriddle/docker@v0.0.0-20210926094723-4072e6aeb013/distribution/pull_v2.go (about)

     1  package distribution
     2  
     3  import (
     4  	"encoding/json"
     5  	"errors"
     6  	"fmt"
     7  	"io"
     8  	"io/ioutil"
     9  	"os"
    10  	"runtime"
    11  
    12  	"github.com/Sirupsen/logrus"
    13  	"github.com/docker/distribution"
    14  	"github.com/docker/distribution/digest"
    15  	"github.com/docker/distribution/manifest/manifestlist"
    16  	"github.com/docker/distribution/manifest/schema1"
    17  	"github.com/docker/distribution/manifest/schema2"
    18  	"github.com/docker/distribution/registry/api/errcode"
    19  	"github.com/docker/distribution/registry/client"
    20  	"github.com/docker/docker/distribution/metadata"
    21  	"github.com/docker/docker/distribution/xfer"
    22  	"github.com/docker/docker/image"
    23  	"github.com/docker/docker/image/v1"
    24  	"github.com/docker/docker/layer"
    25  	"github.com/docker/docker/pkg/ioutils"
    26  	"github.com/docker/docker/pkg/progress"
    27  	"github.com/docker/docker/pkg/stringid"
    28  	"github.com/docker/docker/reference"
    29  	"github.com/docker/docker/registry"
    30  	"golang.org/x/net/context"
    31  )
    32  
    33  var errRootFSMismatch = errors.New("layers from manifest don't match image configuration")
    34  
    35  type v2Puller struct {
    36  	V2MetadataService *metadata.V2MetadataService
    37  	endpoint          registry.APIEndpoint
    38  	config            *ImagePullConfig
    39  	repoInfo          *registry.RepositoryInfo
    40  	repo              distribution.Repository
    41  	// confirmedV2 is set to true if we confirm we're talking to a v2
    42  	// registry. This is used to limit fallbacks to the v1 protocol.
    43  	confirmedV2 bool
    44  }
    45  
    46  func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) {
    47  	// TODO(tiborvass): was ReceiveTimeout
    48  	p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull")
    49  	if err != nil {
    50  		logrus.Warnf("Error getting v2 registry: %v", err)
    51  		return fallbackError{err: err, confirmedV2: p.confirmedV2}
    52  	}
    53  
    54  	if err = p.pullV2Repository(ctx, ref); err != nil {
    55  		if _, ok := err.(fallbackError); ok {
    56  			return err
    57  		}
    58  		if registry.ContinueOnError(err) {
    59  			logrus.Debugf("Error trying v2 registry: %v", err)
    60  			return fallbackError{err: err, confirmedV2: p.confirmedV2}
    61  		}
    62  	}
    63  	return err
    64  }
    65  
    66  func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) {
    67  	var layersDownloaded bool
    68  	if !reference.IsNameOnly(ref) {
    69  		layersDownloaded, err = p.pullV2Tag(ctx, ref)
    70  		if err != nil {
    71  			return err
    72  		}
    73  	} else {
    74  		tags, err := p.repo.Tags(ctx).All(ctx)
    75  		if err != nil {
    76  			// If this repository doesn't exist on V2, we should
    77  			// permit a fallback to V1.
    78  			return allowV1Fallback(err)
    79  		}
    80  
    81  		// The v2 registry knows about this repository, so we will not
    82  		// allow fallback to the v1 protocol even if we encounter an
    83  		// error later on.
    84  		p.confirmedV2 = true
    85  
    86  		for _, tag := range tags {
    87  			tagRef, err := reference.WithTag(ref, tag)
    88  			if err != nil {
    89  				return err
    90  			}
    91  			pulledNew, err := p.pullV2Tag(ctx, tagRef)
    92  			if err != nil {
    93  				// Since this is the pull-all-tags case, don't
    94  				// allow an error pulling a particular tag to
    95  				// make the whole pull fall back to v1.
    96  				if fallbackErr, ok := err.(fallbackError); ok {
    97  					return fallbackErr.err
    98  				}
    99  				return err
   100  			}
   101  			// pulledNew is true if either new layers were downloaded OR if existing images were newly tagged
   102  			// TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus?
   103  			layersDownloaded = layersDownloaded || pulledNew
   104  		}
   105  	}
   106  
   107  	writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded)
   108  
   109  	return nil
   110  }
   111  
   112  type v2LayerDescriptor struct {
   113  	digest            digest.Digest
   114  	repoInfo          *registry.RepositoryInfo
   115  	repo              distribution.Repository
   116  	V2MetadataService *metadata.V2MetadataService
   117  }
   118  
   119  func (ld *v2LayerDescriptor) Key() string {
   120  	return "v2:" + ld.digest.String()
   121  }
   122  
   123  func (ld *v2LayerDescriptor) ID() string {
   124  	return stringid.TruncateID(ld.digest.String())
   125  }
   126  
   127  func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) {
   128  	return ld.V2MetadataService.GetDiffID(ld.digest)
   129  }
   130  
   131  func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
   132  	logrus.Debugf("pulling blob %q", ld.digest)
   133  
   134  	blobs := ld.repo.Blobs(ctx)
   135  
   136  	layerDownload, err := blobs.Open(ctx, ld.digest)
   137  	if err != nil {
   138  		logrus.Debugf("Error statting layer: %v", err)
   139  		if err == distribution.ErrBlobUnknown {
   140  			return nil, 0, xfer.DoNotRetry{Err: err}
   141  		}
   142  		return nil, 0, retryOnError(err)
   143  	}
   144  
   145  	size, err := layerDownload.Seek(0, os.SEEK_END)
   146  	if err != nil {
   147  		// Seek failed, perhaps because there was no Content-Length
   148  		// header. This shouldn't fail the download, because we can
   149  		// still continue without a progress bar.
   150  		size = 0
   151  	} else {
   152  		// Restore the seek offset at the beginning of the stream.
   153  		_, err = layerDownload.Seek(0, os.SEEK_SET)
   154  		if err != nil {
   155  			return nil, 0, err
   156  		}
   157  	}
   158  
   159  	reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size, ld.ID(), "Downloading")
   160  	defer reader.Close()
   161  
   162  	verifier, err := digest.NewDigestVerifier(ld.digest)
   163  	if err != nil {
   164  		return nil, 0, xfer.DoNotRetry{Err: err}
   165  	}
   166  
   167  	tmpFile, err := ioutil.TempFile("", "GetImageBlob")
   168  	if err != nil {
   169  		return nil, 0, xfer.DoNotRetry{Err: err}
   170  	}
   171  
   172  	_, err = io.Copy(tmpFile, io.TeeReader(reader, verifier))
   173  	if err != nil {
   174  		tmpFile.Close()
   175  		if err := os.Remove(tmpFile.Name()); err != nil {
   176  			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
   177  		}
   178  		return nil, 0, retryOnError(err)
   179  	}
   180  
   181  	progress.Update(progressOutput, ld.ID(), "Verifying Checksum")
   182  
   183  	if !verifier.Verified() {
   184  		err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest)
   185  		logrus.Error(err)
   186  
   187  		tmpFile.Close()
   188  		if err := os.Remove(tmpFile.Name()); err != nil {
   189  			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
   190  		}
   191  
   192  		return nil, 0, xfer.DoNotRetry{Err: err}
   193  	}
   194  
   195  	progress.Update(progressOutput, ld.ID(), "Download complete")
   196  
   197  	logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name())
   198  
   199  	_, err = tmpFile.Seek(0, os.SEEK_SET)
   200  	if err != nil {
   201  		tmpFile.Close()
   202  		if err := os.Remove(tmpFile.Name()); err != nil {
   203  			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
   204  		}
   205  		return nil, 0, xfer.DoNotRetry{Err: err}
   206  	}
   207  	return ioutils.NewReadCloserWrapper(tmpFile, tmpFileCloser(tmpFile)), size, nil
   208  }
   209  
   210  func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) {
   211  	// Cache mapping from this layer's DiffID to the blobsum
   212  	ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.FullName()})
   213  }
   214  
   215  func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) {
   216  	manSvc, err := p.repo.Manifests(ctx)
   217  	if err != nil {
   218  		return false, err
   219  	}
   220  
   221  	var (
   222  		manifest    distribution.Manifest
   223  		tagOrDigest string // Used for logging/progress only
   224  	)
   225  	if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
   226  		// NOTE: not using TagService.Get, since it uses HEAD requests
   227  		// against the manifests endpoint, which are not supported by
   228  		// all registry versions.
   229  		manifest, err = manSvc.Get(ctx, "", client.WithTag(tagged.Tag()))
   230  		if err != nil {
   231  			return false, allowV1Fallback(err)
   232  		}
   233  		tagOrDigest = tagged.Tag()
   234  	} else if digested, isDigested := ref.(reference.Canonical); isDigested {
   235  		manifest, err = manSvc.Get(ctx, digested.Digest())
   236  		if err != nil {
   237  			return false, err
   238  		}
   239  		tagOrDigest = digested.Digest().String()
   240  	} else {
   241  		return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String())
   242  	}
   243  
   244  	if manifest == nil {
   245  		return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest)
   246  	}
   247  
   248  	// If manSvc.Get succeeded, we can be confident that the registry on
   249  	// the other side speaks the v2 protocol.
   250  	p.confirmedV2 = true
   251  
   252  	logrus.Debugf("Pulling ref from V2 registry: %s", ref.String())
   253  	progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Name())
   254  
   255  	var (
   256  		imageID        image.ID
   257  		manifestDigest digest.Digest
   258  	)
   259  
   260  	switch v := manifest.(type) {
   261  	case *schema1.SignedManifest:
   262  		imageID, manifestDigest, err = p.pullSchema1(ctx, ref, v)
   263  		if err != nil {
   264  			return false, err
   265  		}
   266  	case *schema2.DeserializedManifest:
   267  		imageID, manifestDigest, err = p.pullSchema2(ctx, ref, v)
   268  		if err != nil {
   269  			return false, err
   270  		}
   271  	case *manifestlist.DeserializedManifestList:
   272  		imageID, manifestDigest, err = p.pullManifestList(ctx, ref, v)
   273  		if err != nil {
   274  			return false, err
   275  		}
   276  	default:
   277  		return false, errors.New("unsupported manifest format")
   278  	}
   279  
   280  	progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())
   281  
   282  	oldTagImageID, err := p.config.ReferenceStore.Get(ref)
   283  	if err == nil {
   284  		if oldTagImageID == imageID {
   285  			return false, nil
   286  		}
   287  	} else if err != reference.ErrDoesNotExist {
   288  		return false, err
   289  	}
   290  
   291  	if canonical, ok := ref.(reference.Canonical); ok {
   292  		if err = p.config.ReferenceStore.AddDigest(canonical, imageID, true); err != nil {
   293  			return false, err
   294  		}
   295  	} else if err = p.config.ReferenceStore.AddTag(ref, imageID, true); err != nil {
   296  		return false, err
   297  	}
   298  
   299  	return true, nil
   300  }
   301  
   302  func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) {
   303  	var verifiedManifest *schema1.Manifest
   304  	verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref)
   305  	if err != nil {
   306  		return "", "", err
   307  	}
   308  
   309  	rootFS := image.NewRootFS()
   310  
   311  	if err := detectBaseLayer(p.config.ImageStore, verifiedManifest, rootFS); err != nil {
   312  		return "", "", err
   313  	}
   314  
   315  	// remove duplicate layers and check parent chain validity
   316  	err = fixManifestLayers(verifiedManifest)
   317  	if err != nil {
   318  		return "", "", err
   319  	}
   320  
   321  	var descriptors []xfer.DownloadDescriptor
   322  
   323  	// Image history converted to the new format
   324  	var history []image.History
   325  
   326  	// Note that the order of this loop is in the direction of bottom-most
   327  	// to top-most, so that the downloads slice gets ordered correctly.
   328  	for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- {
   329  		blobSum := verifiedManifest.FSLayers[i].BlobSum
   330  
   331  		var throwAway struct {
   332  			ThrowAway bool `json:"throwaway,omitempty"`
   333  		}
   334  		if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil {
   335  			return "", "", err
   336  		}
   337  
   338  		h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway)
   339  		if err != nil {
   340  			return "", "", err
   341  		}
   342  		history = append(history, h)
   343  
   344  		if throwAway.ThrowAway {
   345  			continue
   346  		}
   347  
   348  		layerDescriptor := &v2LayerDescriptor{
   349  			digest:            blobSum,
   350  			repoInfo:          p.repoInfo,
   351  			repo:              p.repo,
   352  			V2MetadataService: p.V2MetadataService,
   353  		}
   354  
   355  		descriptors = append(descriptors, layerDescriptor)
   356  	}
   357  
   358  	resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput)
   359  	if err != nil {
   360  		return "", "", err
   361  	}
   362  	defer release()
   363  
   364  	config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history)
   365  	if err != nil {
   366  		return "", "", err
   367  	}
   368  
   369  	imageID, err = p.config.ImageStore.Create(config)
   370  	if err != nil {
   371  		return "", "", err
   372  	}
   373  
   374  	manifestDigest = digest.FromBytes(unverifiedManifest.Canonical)
   375  
   376  	return imageID, manifestDigest, nil
   377  }
   378  
   379  func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) {
   380  	manifestDigest, err = schema2ManifestDigest(ref, mfst)
   381  	if err != nil {
   382  		return "", "", err
   383  	}
   384  
   385  	target := mfst.Target()
   386  	imageID = image.ID(target.Digest)
   387  	if _, err := p.config.ImageStore.Get(imageID); err == nil {
   388  		// If the image already exists locally, no need to pull
   389  		// anything.
   390  		return imageID, manifestDigest, nil
   391  	}
   392  
   393  	configChan := make(chan []byte, 1)
   394  	errChan := make(chan error, 1)
   395  	var cancel func()
   396  	ctx, cancel = context.WithCancel(ctx)
   397  
   398  	// Pull the image config
   399  	go func() {
   400  		configJSON, err := p.pullSchema2ImageConfig(ctx, target.Digest)
   401  		if err != nil {
   402  			errChan <- err
   403  			cancel()
   404  			return
   405  		}
   406  		configChan <- configJSON
   407  	}()
   408  
   409  	var descriptors []xfer.DownloadDescriptor
   410  
   411  	// Note that the order of this loop is in the direction of bottom-most
   412  	// to top-most, so that the downloads slice gets ordered correctly.
   413  	for _, d := range mfst.References() {
   414  		layerDescriptor := &v2LayerDescriptor{
   415  			digest:            d.Digest,
   416  			repo:              p.repo,
   417  			repoInfo:          p.repoInfo,
   418  			V2MetadataService: p.V2MetadataService,
   419  		}
   420  
   421  		descriptors = append(descriptors, layerDescriptor)
   422  	}
   423  
   424  	var (
   425  		configJSON         []byte       // raw serialized image config
   426  		unmarshalledConfig image.Image  // deserialized image config
   427  		downloadRootFS     image.RootFS // rootFS to use for registering layers.
   428  	)
   429  	if runtime.GOOS == "windows" {
   430  		configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan)
   431  		if err != nil {
   432  			return "", "", err
   433  		}
   434  		if unmarshalledConfig.RootFS == nil {
   435  			return "", "", errors.New("image config has no rootfs section")
   436  		}
   437  		downloadRootFS = *unmarshalledConfig.RootFS
   438  		downloadRootFS.DiffIDs = []layer.DiffID{}
   439  	} else {
   440  		downloadRootFS = *image.NewRootFS()
   441  	}
   442  
   443  	rootFS, release, err := p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput)
   444  	if err != nil {
   445  		if configJSON != nil {
   446  			// Already received the config
   447  			return "", "", err
   448  		}
   449  		select {
   450  		case err = <-errChan:
   451  			return "", "", err
   452  		default:
   453  			cancel()
   454  			select {
   455  			case <-configChan:
   456  			case <-errChan:
   457  			}
   458  			return "", "", err
   459  		}
   460  	}
   461  	defer release()
   462  
   463  	if configJSON == nil {
   464  		configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan)
   465  		if err != nil {
   466  			return "", "", err
   467  		}
   468  	}
   469  
   470  	// The DiffIDs returned in rootFS MUST match those in the config.
   471  	// Otherwise the image config could be referencing layers that aren't
   472  	// included in the manifest.
   473  	if len(rootFS.DiffIDs) != len(unmarshalledConfig.RootFS.DiffIDs) {
   474  		return "", "", errRootFSMismatch
   475  	}
   476  
   477  	for i := range rootFS.DiffIDs {
   478  		if rootFS.DiffIDs[i] != unmarshalledConfig.RootFS.DiffIDs[i] {
   479  			return "", "", errRootFSMismatch
   480  		}
   481  	}
   482  
   483  	imageID, err = p.config.ImageStore.Create(configJSON)
   484  	if err != nil {
   485  		return "", "", err
   486  	}
   487  
   488  	return imageID, manifestDigest, nil
   489  }
   490  
   491  func receiveConfig(configChan <-chan []byte, errChan <-chan error) ([]byte, image.Image, error) {
   492  	select {
   493  	case configJSON := <-configChan:
   494  		var unmarshalledConfig image.Image
   495  		if err := json.Unmarshal(configJSON, &unmarshalledConfig); err != nil {
   496  			return nil, image.Image{}, err
   497  		}
   498  		return configJSON, unmarshalledConfig, nil
   499  	case err := <-errChan:
   500  		return nil, image.Image{}, err
   501  		// Don't need a case for ctx.Done in the select because cancellation
   502  		// will trigger an error in p.pullSchema2ImageConfig.
   503  	}
   504  }
   505  
   506  // pullManifestList handles "manifest lists" which point to various
   507  // platform-specifc manifests.
   508  func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (imageID image.ID, manifestListDigest digest.Digest, err error) {
   509  	manifestListDigest, err = schema2ManifestDigest(ref, mfstList)
   510  	if err != nil {
   511  		return "", "", err
   512  	}
   513  
   514  	var manifestDigest digest.Digest
   515  	for _, manifestDescriptor := range mfstList.Manifests {
   516  		// TODO(aaronl): The manifest list spec supports optional
   517  		// "features" and "variant" fields. These are not yet used.
   518  		// Once they are, their values should be interpreted here.
   519  		if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS {
   520  			manifestDigest = manifestDescriptor.Digest
   521  			break
   522  		}
   523  	}
   524  
   525  	if manifestDigest == "" {
   526  		return "", "", errors.New("no supported platform found in manifest list")
   527  	}
   528  
   529  	manSvc, err := p.repo.Manifests(ctx)
   530  	if err != nil {
   531  		return "", "", err
   532  	}
   533  
   534  	manifest, err := manSvc.Get(ctx, manifestDigest)
   535  	if err != nil {
   536  		return "", "", err
   537  	}
   538  
   539  	manifestRef, err := reference.WithDigest(ref, manifestDigest)
   540  	if err != nil {
   541  		return "", "", err
   542  	}
   543  
   544  	switch v := manifest.(type) {
   545  	case *schema1.SignedManifest:
   546  		imageID, _, err = p.pullSchema1(ctx, manifestRef, v)
   547  		if err != nil {
   548  			return "", "", err
   549  		}
   550  	case *schema2.DeserializedManifest:
   551  		imageID, _, err = p.pullSchema2(ctx, manifestRef, v)
   552  		if err != nil {
   553  			return "", "", err
   554  		}
   555  	default:
   556  		return "", "", errors.New("unsupported manifest format")
   557  	}
   558  
   559  	return imageID, manifestListDigest, err
   560  }
   561  
   562  func (p *v2Puller) pullSchema2ImageConfig(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) {
   563  	blobs := p.repo.Blobs(ctx)
   564  	configJSON, err = blobs.Get(ctx, dgst)
   565  	if err != nil {
   566  		return nil, err
   567  	}
   568  
   569  	// Verify image config digest
   570  	verifier, err := digest.NewDigestVerifier(dgst)
   571  	if err != nil {
   572  		return nil, err
   573  	}
   574  	if _, err := verifier.Write(configJSON); err != nil {
   575  		return nil, err
   576  	}
   577  	if !verifier.Verified() {
   578  		err := fmt.Errorf("image config verification failed for digest %s", dgst)
   579  		logrus.Error(err)
   580  		return nil, err
   581  	}
   582  
   583  	return configJSON, nil
   584  }
   585  
   586  // schema2ManifestDigest computes the manifest digest, and, if pulling by
   587  // digest, ensures that it matches the requested digest.
   588  func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) {
   589  	_, canonical, err := mfst.Payload()
   590  	if err != nil {
   591  		return "", err
   592  	}
   593  
   594  	// If pull by digest, then verify the manifest digest.
   595  	if digested, isDigested := ref.(reference.Canonical); isDigested {
   596  		verifier, err := digest.NewDigestVerifier(digested.Digest())
   597  		if err != nil {
   598  			return "", err
   599  		}
   600  		if _, err := verifier.Write(canonical); err != nil {
   601  			return "", err
   602  		}
   603  		if !verifier.Verified() {
   604  			err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest())
   605  			logrus.Error(err)
   606  			return "", err
   607  		}
   608  		return digested.Digest(), nil
   609  	}
   610  
   611  	return digest.FromBytes(canonical), nil
   612  }
   613  
   614  // allowV1Fallback checks if the error is a possible reason to fallback to v1
   615  // (even if confirmedV2 has been set already), and if so, wraps the error in
   616  // a fallbackError with confirmedV2 set to false. Otherwise, it returns the
   617  // error unmodified.
   618  func allowV1Fallback(err error) error {
   619  	switch v := err.(type) {
   620  	case errcode.Errors:
   621  		if len(v) != 0 {
   622  			if v0, ok := v[0].(errcode.Error); ok && registry.ShouldV2Fallback(v0) {
   623  				return fallbackError{err: err, confirmedV2: false}
   624  			}
   625  		}
   626  	case errcode.Error:
   627  		if registry.ShouldV2Fallback(v) {
   628  			return fallbackError{err: err, confirmedV2: false}
   629  		}
   630  	}
   631  
   632  	return err
   633  }
   634  
   635  func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) {
   636  	// If pull by digest, then verify the manifest digest. NOTE: It is
   637  	// important to do this first, before any other content validation. If the
   638  	// digest cannot be verified, don't even bother with those other things.
   639  	if digested, isCanonical := ref.(reference.Canonical); isCanonical {
   640  		verifier, err := digest.NewDigestVerifier(digested.Digest())
   641  		if err != nil {
   642  			return nil, err
   643  		}
   644  		if _, err := verifier.Write(signedManifest.Canonical); err != nil {
   645  			return nil, err
   646  		}
   647  		if !verifier.Verified() {
   648  			err := fmt.Errorf("image verification failed for digest %s", digested.Digest())
   649  			logrus.Error(err)
   650  			return nil, err
   651  		}
   652  	}
   653  	m = &signedManifest.Manifest
   654  
   655  	if m.SchemaVersion != 1 {
   656  		return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String())
   657  	}
   658  	if len(m.FSLayers) != len(m.History) {
   659  		return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String())
   660  	}
   661  	if len(m.FSLayers) == 0 {
   662  		return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String())
   663  	}
   664  	return m, nil
   665  }
   666  
   667  // fixManifestLayers removes repeated layers from the manifest and checks the
   668  // correctness of the parent chain.
   669  func fixManifestLayers(m *schema1.Manifest) error {
   670  	imgs := make([]*image.V1Image, len(m.FSLayers))
   671  	for i := range m.FSLayers {
   672  		img := &image.V1Image{}
   673  
   674  		if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil {
   675  			return err
   676  		}
   677  
   678  		imgs[i] = img
   679  		if err := v1.ValidateID(img.ID); err != nil {
   680  			return err
   681  		}
   682  	}
   683  
   684  	if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" {
   685  		// Windows base layer can point to a base layer parent that is not in manifest.
   686  		return errors.New("Invalid parent ID in the base layer of the image.")
   687  	}
   688  
   689  	// check general duplicates to error instead of a deadlock
   690  	idmap := make(map[string]struct{})
   691  
   692  	var lastID string
   693  	for _, img := range imgs {
   694  		// skip IDs that appear after each other, we handle those later
   695  		if _, exists := idmap[img.ID]; img.ID != lastID && exists {
   696  			return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
   697  		}
   698  		lastID = img.ID
   699  		idmap[lastID] = struct{}{}
   700  	}
   701  
   702  	// backwards loop so that we keep the remaining indexes after removing items
   703  	for i := len(imgs) - 2; i >= 0; i-- {
   704  		if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
   705  			m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
   706  			m.History = append(m.History[:i], m.History[i+1:]...)
   707  		} else if imgs[i].Parent != imgs[i+1].ID {
   708  			return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent)
   709  		}
   710  	}
   711  
   712  	return nil
   713  }