github.com/endocode/docker@v1.4.2-0.20160113120958-46eb4700391e/distribution/pull_v2.go (about)

     1  package distribution
     2  
     3  import (
     4  	"encoding/json"
     5  	"errors"
     6  	"fmt"
     7  	"io"
     8  	"io/ioutil"
     9  	"os"
    10  	"runtime"
    11  
    12  	"github.com/Sirupsen/logrus"
    13  	"github.com/docker/distribution"
    14  	"github.com/docker/distribution/digest"
    15  	"github.com/docker/distribution/manifest/manifestlist"
    16  	"github.com/docker/distribution/manifest/schema1"
    17  	"github.com/docker/distribution/manifest/schema2"
    18  	"github.com/docker/distribution/registry/api/errcode"
    19  	"github.com/docker/distribution/registry/client"
    20  	"github.com/docker/docker/distribution/metadata"
    21  	"github.com/docker/docker/distribution/xfer"
    22  	"github.com/docker/docker/image"
    23  	"github.com/docker/docker/image/v1"
    24  	"github.com/docker/docker/layer"
    25  	"github.com/docker/docker/pkg/ioutils"
    26  	"github.com/docker/docker/pkg/progress"
    27  	"github.com/docker/docker/pkg/stringid"
    28  	"github.com/docker/docker/reference"
    29  	"github.com/docker/docker/registry"
    30  	"golang.org/x/net/context"
    31  )
    32  
    33  var errRootFSMismatch = errors.New("layers from manifest don't match image configuration")
    34  
    35  type v2Puller struct {
    36  	blobSumService *metadata.BlobSumService
    37  	endpoint       registry.APIEndpoint
    38  	config         *ImagePullConfig
    39  	repoInfo       *registry.RepositoryInfo
    40  	repo           distribution.Repository
    41  	// confirmedV2 is set to true if we confirm we're talking to a v2
    42  	// registry. This is used to limit fallbacks to the v1 protocol.
    43  	confirmedV2 bool
    44  }
    45  
    46  func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) {
    47  	// TODO(tiborvass): was ReceiveTimeout
    48  	p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull")
    49  	if err != nil {
    50  		logrus.Warnf("Error getting v2 registry: %v", err)
    51  		return fallbackError{err: err, confirmedV2: p.confirmedV2}
    52  	}
    53  
    54  	if err = p.pullV2Repository(ctx, ref); err != nil {
    55  		if _, ok := err.(fallbackError); ok {
    56  			return err
    57  		}
    58  		if registry.ContinueOnError(err) {
    59  			logrus.Debugf("Error trying v2 registry: %v", err)
    60  			return fallbackError{err: err, confirmedV2: p.confirmedV2}
    61  		}
    62  	}
    63  	return err
    64  }
    65  
    66  func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) {
    67  	var layersDownloaded bool
    68  	if !reference.IsNameOnly(ref) {
    69  		layersDownloaded, err = p.pullV2Tag(ctx, ref)
    70  		if err != nil {
    71  			return err
    72  		}
    73  	} else {
    74  		tags, err := p.repo.Tags(ctx).All(ctx)
    75  		if err != nil {
    76  			// If this repository doesn't exist on V2, we should
    77  			// permit a fallback to V1.
    78  			return allowV1Fallback(err)
    79  		}
    80  
    81  		// The v2 registry knows about this repository, so we will not
    82  		// allow fallback to the v1 protocol even if we encounter an
    83  		// error later on.
    84  		p.confirmedV2 = true
    85  
    86  		for _, tag := range tags {
    87  			tagRef, err := reference.WithTag(ref, tag)
    88  			if err != nil {
    89  				return err
    90  			}
    91  			pulledNew, err := p.pullV2Tag(ctx, tagRef)
    92  			if err != nil {
    93  				// Since this is the pull-all-tags case, don't
    94  				// allow an error pulling a particular tag to
    95  				// make the whole pull fall back to v1.
    96  				if fallbackErr, ok := err.(fallbackError); ok {
    97  					return fallbackErr.err
    98  				}
    99  				return err
   100  			}
   101  			// pulledNew is true if either new layers were downloaded OR if existing images were newly tagged
   102  			// TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus?
   103  			layersDownloaded = layersDownloaded || pulledNew
   104  		}
   105  	}
   106  
   107  	writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded)
   108  
   109  	return nil
   110  }
   111  
   112  type v2LayerDescriptor struct {
   113  	digest         digest.Digest
   114  	repo           distribution.Repository
   115  	blobSumService *metadata.BlobSumService
   116  }
   117  
   118  func (ld *v2LayerDescriptor) Key() string {
   119  	return "v2:" + ld.digest.String()
   120  }
   121  
   122  func (ld *v2LayerDescriptor) ID() string {
   123  	return stringid.TruncateID(ld.digest.String())
   124  }
   125  
   126  func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) {
   127  	return ld.blobSumService.GetDiffID(ld.digest)
   128  }
   129  
   130  func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
   131  	logrus.Debugf("pulling blob %q", ld.digest)
   132  
   133  	blobs := ld.repo.Blobs(ctx)
   134  
   135  	layerDownload, err := blobs.Open(ctx, ld.digest)
   136  	if err != nil {
   137  		logrus.Debugf("Error statting layer: %v", err)
   138  		if err == distribution.ErrBlobUnknown {
   139  			return nil, 0, xfer.DoNotRetry{Err: err}
   140  		}
   141  		return nil, 0, retryOnError(err)
   142  	}
   143  
   144  	size, err := layerDownload.Seek(0, os.SEEK_END)
   145  	if err != nil {
   146  		// Seek failed, perhaps because there was no Content-Length
   147  		// header. This shouldn't fail the download, because we can
   148  		// still continue without a progress bar.
   149  		size = 0
   150  	} else {
   151  		// Restore the seek offset at the beginning of the stream.
   152  		_, err = layerDownload.Seek(0, os.SEEK_SET)
   153  		if err != nil {
   154  			return nil, 0, err
   155  		}
   156  	}
   157  
   158  	reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size, ld.ID(), "Downloading")
   159  	defer reader.Close()
   160  
   161  	verifier, err := digest.NewDigestVerifier(ld.digest)
   162  	if err != nil {
   163  		return nil, 0, xfer.DoNotRetry{Err: err}
   164  	}
   165  
   166  	tmpFile, err := ioutil.TempFile("", "GetImageBlob")
   167  	if err != nil {
   168  		return nil, 0, xfer.DoNotRetry{Err: err}
   169  	}
   170  
   171  	_, err = io.Copy(tmpFile, io.TeeReader(reader, verifier))
   172  	if err != nil {
   173  		return nil, 0, retryOnError(err)
   174  	}
   175  
   176  	progress.Update(progressOutput, ld.ID(), "Verifying Checksum")
   177  
   178  	if !verifier.Verified() {
   179  		err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest)
   180  		logrus.Error(err)
   181  		tmpFile.Close()
   182  		if err := os.RemoveAll(tmpFile.Name()); err != nil {
   183  			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
   184  		}
   185  
   186  		return nil, 0, xfer.DoNotRetry{Err: err}
   187  	}
   188  
   189  	progress.Update(progressOutput, ld.ID(), "Download complete")
   190  
   191  	logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name())
   192  
   193  	tmpFile.Seek(0, 0)
   194  	return ioutils.NewReadCloserWrapper(tmpFile, tmpFileCloser(tmpFile)), size, nil
   195  }
   196  
   197  func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) {
   198  	// Cache mapping from this layer's DiffID to the blobsum
   199  	ld.blobSumService.Add(diffID, ld.digest)
   200  }
   201  
   202  func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) {
   203  	manSvc, err := p.repo.Manifests(ctx)
   204  	if err != nil {
   205  		return false, err
   206  	}
   207  
   208  	var (
   209  		manifest    distribution.Manifest
   210  		tagOrDigest string // Used for logging/progress only
   211  	)
   212  	if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
   213  		// NOTE: not using TagService.Get, since it uses HEAD requests
   214  		// against the manifests endpoint, which are not supported by
   215  		// all registry versions.
   216  		manifest, err = manSvc.Get(ctx, "", client.WithTag(tagged.Tag()))
   217  		if err != nil {
   218  			return false, allowV1Fallback(err)
   219  		}
   220  		tagOrDigest = tagged.Tag()
   221  	} else if digested, isDigested := ref.(reference.Canonical); isDigested {
   222  		manifest, err = manSvc.Get(ctx, digested.Digest())
   223  		if err != nil {
   224  			return false, err
   225  		}
   226  		tagOrDigest = digested.Digest().String()
   227  	} else {
   228  		return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String())
   229  	}
   230  
   231  	if manifest == nil {
   232  		return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest)
   233  	}
   234  
   235  	// If manSvc.Get succeeded, we can be confident that the registry on
   236  	// the other side speaks the v2 protocol.
   237  	p.confirmedV2 = true
   238  
   239  	logrus.Debugf("Pulling ref from V2 registry: %s", ref.String())
   240  	progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Name())
   241  
   242  	var (
   243  		imageID        image.ID
   244  		manifestDigest digest.Digest
   245  	)
   246  
   247  	switch v := manifest.(type) {
   248  	case *schema1.SignedManifest:
   249  		imageID, manifestDigest, err = p.pullSchema1(ctx, ref, v)
   250  		if err != nil {
   251  			return false, err
   252  		}
   253  	case *schema2.DeserializedManifest:
   254  		imageID, manifestDigest, err = p.pullSchema2(ctx, ref, v)
   255  		if err != nil {
   256  			return false, err
   257  		}
   258  	case *manifestlist.DeserializedManifestList:
   259  		imageID, manifestDigest, err = p.pullManifestList(ctx, ref, v)
   260  		if err != nil {
   261  			return false, err
   262  		}
   263  	default:
   264  		return false, errors.New("unsupported manifest format")
   265  	}
   266  
   267  	progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())
   268  
   269  	oldTagImageID, err := p.config.ReferenceStore.Get(ref)
   270  	if err == nil {
   271  		if oldTagImageID == imageID {
   272  			return false, nil
   273  		}
   274  	} else if err != reference.ErrDoesNotExist {
   275  		return false, err
   276  	}
   277  
   278  	if canonical, ok := ref.(reference.Canonical); ok {
   279  		if err = p.config.ReferenceStore.AddDigest(canonical, imageID, true); err != nil {
   280  			return false, err
   281  		}
   282  	} else if err = p.config.ReferenceStore.AddTag(ref, imageID, true); err != nil {
   283  		return false, err
   284  	}
   285  
   286  	return true, nil
   287  }
   288  
   289  func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) {
   290  	var verifiedManifest *schema1.Manifest
   291  	verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref)
   292  	if err != nil {
   293  		return "", "", err
   294  	}
   295  
   296  	rootFS := image.NewRootFS()
   297  
   298  	if err := detectBaseLayer(p.config.ImageStore, verifiedManifest, rootFS); err != nil {
   299  		return "", "", err
   300  	}
   301  
   302  	// remove duplicate layers and check parent chain validity
   303  	err = fixManifestLayers(verifiedManifest)
   304  	if err != nil {
   305  		return "", "", err
   306  	}
   307  
   308  	var descriptors []xfer.DownloadDescriptor
   309  
   310  	// Image history converted to the new format
   311  	var history []image.History
   312  
   313  	// Note that the order of this loop is in the direction of bottom-most
   314  	// to top-most, so that the downloads slice gets ordered correctly.
   315  	for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- {
   316  		blobSum := verifiedManifest.FSLayers[i].BlobSum
   317  
   318  		var throwAway struct {
   319  			ThrowAway bool `json:"throwaway,omitempty"`
   320  		}
   321  		if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil {
   322  			return "", "", err
   323  		}
   324  
   325  		h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway)
   326  		if err != nil {
   327  			return "", "", err
   328  		}
   329  		history = append(history, h)
   330  
   331  		if throwAway.ThrowAway {
   332  			continue
   333  		}
   334  
   335  		layerDescriptor := &v2LayerDescriptor{
   336  			digest:         blobSum,
   337  			repo:           p.repo,
   338  			blobSumService: p.blobSumService,
   339  		}
   340  
   341  		descriptors = append(descriptors, layerDescriptor)
   342  	}
   343  
   344  	resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput)
   345  	if err != nil {
   346  		return "", "", err
   347  	}
   348  	defer release()
   349  
   350  	config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history)
   351  	if err != nil {
   352  		return "", "", err
   353  	}
   354  
   355  	imageID, err = p.config.ImageStore.Create(config)
   356  	if err != nil {
   357  		return "", "", err
   358  	}
   359  
   360  	manifestDigest = digest.FromBytes(unverifiedManifest.Canonical)
   361  
   362  	return imageID, manifestDigest, nil
   363  }
   364  
   365  func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) {
   366  	manifestDigest, err = schema2ManifestDigest(ref, mfst)
   367  	if err != nil {
   368  		return "", "", err
   369  	}
   370  
   371  	target := mfst.Target()
   372  	imageID = image.ID(target.Digest)
   373  	if _, err := p.config.ImageStore.Get(imageID); err == nil {
   374  		// If the image already exists locally, no need to pull
   375  		// anything.
   376  		return imageID, manifestDigest, nil
   377  	}
   378  
   379  	configChan := make(chan []byte, 1)
   380  	errChan := make(chan error, 1)
   381  	var cancel func()
   382  	ctx, cancel = context.WithCancel(ctx)
   383  
   384  	// Pull the image config
   385  	go func() {
   386  		configJSON, err := p.pullSchema2ImageConfig(ctx, target.Digest)
   387  		if err != nil {
   388  			errChan <- err
   389  			cancel()
   390  			return
   391  		}
   392  		configChan <- configJSON
   393  	}()
   394  
   395  	var descriptors []xfer.DownloadDescriptor
   396  
   397  	// Note that the order of this loop is in the direction of bottom-most
   398  	// to top-most, so that the downloads slice gets ordered correctly.
   399  	for _, d := range mfst.References() {
   400  		layerDescriptor := &v2LayerDescriptor{
   401  			digest:         d.Digest,
   402  			repo:           p.repo,
   403  			blobSumService: p.blobSumService,
   404  		}
   405  
   406  		descriptors = append(descriptors, layerDescriptor)
   407  	}
   408  
   409  	var (
   410  		configJSON         []byte       // raw serialized image config
   411  		unmarshalledConfig image.Image  // deserialized image config
   412  		downloadRootFS     image.RootFS // rootFS to use for registering layers.
   413  	)
   414  	if runtime.GOOS == "windows" {
   415  		configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan)
   416  		if err != nil {
   417  			return "", "", err
   418  		}
   419  		if unmarshalledConfig.RootFS == nil {
   420  			return "", "", errors.New("image config has no rootfs section")
   421  		}
   422  		downloadRootFS = *unmarshalledConfig.RootFS
   423  		downloadRootFS.DiffIDs = []layer.DiffID{}
   424  	} else {
   425  		downloadRootFS = *image.NewRootFS()
   426  	}
   427  
   428  	rootFS, release, err := p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput)
   429  	if err != nil {
   430  		if configJSON != nil {
   431  			// Already received the config
   432  			return "", "", err
   433  		}
   434  		select {
   435  		case err = <-errChan:
   436  			return "", "", err
   437  		default:
   438  			cancel()
   439  			select {
   440  			case <-configChan:
   441  			case <-errChan:
   442  			}
   443  			return "", "", err
   444  		}
   445  	}
   446  	defer release()
   447  
   448  	if configJSON == nil {
   449  		configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan)
   450  		if err != nil {
   451  			return "", "", err
   452  		}
   453  	}
   454  
   455  	// The DiffIDs returned in rootFS MUST match those in the config.
   456  	// Otherwise the image config could be referencing layers that aren't
   457  	// included in the manifest.
   458  	if len(rootFS.DiffIDs) != len(unmarshalledConfig.RootFS.DiffIDs) {
   459  		return "", "", errRootFSMismatch
   460  	}
   461  
   462  	for i := range rootFS.DiffIDs {
   463  		if rootFS.DiffIDs[i] != unmarshalledConfig.RootFS.DiffIDs[i] {
   464  			return "", "", errRootFSMismatch
   465  		}
   466  	}
   467  
   468  	imageID, err = p.config.ImageStore.Create(configJSON)
   469  	if err != nil {
   470  		return "", "", err
   471  	}
   472  
   473  	return imageID, manifestDigest, nil
   474  }
   475  
   476  func receiveConfig(configChan <-chan []byte, errChan <-chan error) ([]byte, image.Image, error) {
   477  	select {
   478  	case configJSON := <-configChan:
   479  		var unmarshalledConfig image.Image
   480  		if err := json.Unmarshal(configJSON, &unmarshalledConfig); err != nil {
   481  			return nil, image.Image{}, err
   482  		}
   483  		return configJSON, unmarshalledConfig, nil
   484  	case err := <-errChan:
   485  		return nil, image.Image{}, err
   486  		// Don't need a case for ctx.Done in the select because cancellation
   487  		// will trigger an error in p.pullSchema2ImageConfig.
   488  	}
   489  }
   490  
   491  // pullManifestList handles "manifest lists" which point to various
   492  // platform-specifc manifests.
   493  func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (imageID image.ID, manifestListDigest digest.Digest, err error) {
   494  	manifestListDigest, err = schema2ManifestDigest(ref, mfstList)
   495  	if err != nil {
   496  		return "", "", err
   497  	}
   498  
   499  	var manifestDigest digest.Digest
   500  	for _, manifestDescriptor := range mfstList.Manifests {
   501  		// TODO(aaronl): The manifest list spec supports optional
   502  		// "features" and "variant" fields. These are not yet used.
   503  		// Once they are, their values should be interpreted here.
   504  		if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS {
   505  			manifestDigest = manifestDescriptor.Digest
   506  			break
   507  		}
   508  	}
   509  
   510  	if manifestDigest == "" {
   511  		return "", "", errors.New("no supported platform found in manifest list")
   512  	}
   513  
   514  	manSvc, err := p.repo.Manifests(ctx)
   515  	if err != nil {
   516  		return "", "", err
   517  	}
   518  
   519  	manifest, err := manSvc.Get(ctx, manifestDigest)
   520  	if err != nil {
   521  		return "", "", err
   522  	}
   523  
   524  	manifestRef, err := reference.WithDigest(ref, manifestDigest)
   525  	if err != nil {
   526  		return "", "", err
   527  	}
   528  
   529  	switch v := manifest.(type) {
   530  	case *schema1.SignedManifest:
   531  		imageID, _, err = p.pullSchema1(ctx, manifestRef, v)
   532  		if err != nil {
   533  			return "", "", err
   534  		}
   535  	case *schema2.DeserializedManifest:
   536  		imageID, _, err = p.pullSchema2(ctx, manifestRef, v)
   537  		if err != nil {
   538  			return "", "", err
   539  		}
   540  	default:
   541  		return "", "", errors.New("unsupported manifest format")
   542  	}
   543  
   544  	return imageID, manifestListDigest, err
   545  }
   546  
   547  func (p *v2Puller) pullSchema2ImageConfig(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) {
   548  	blobs := p.repo.Blobs(ctx)
   549  	configJSON, err = blobs.Get(ctx, dgst)
   550  	if err != nil {
   551  		return nil, err
   552  	}
   553  
   554  	// Verify image config digest
   555  	verifier, err := digest.NewDigestVerifier(dgst)
   556  	if err != nil {
   557  		return nil, err
   558  	}
   559  	if _, err := verifier.Write(configJSON); err != nil {
   560  		return nil, err
   561  	}
   562  	if !verifier.Verified() {
   563  		err := fmt.Errorf("image config verification failed for digest %s", dgst)
   564  		logrus.Error(err)
   565  		return nil, err
   566  	}
   567  
   568  	return configJSON, nil
   569  }
   570  
   571  // schema2ManifestDigest computes the manifest digest, and, if pulling by
   572  // digest, ensures that it matches the requested digest.
   573  func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) {
   574  	_, canonical, err := mfst.Payload()
   575  	if err != nil {
   576  		return "", err
   577  	}
   578  
   579  	// If pull by digest, then verify the manifest digest.
   580  	if digested, isDigested := ref.(reference.Canonical); isDigested {
   581  		verifier, err := digest.NewDigestVerifier(digested.Digest())
   582  		if err != nil {
   583  			return "", err
   584  		}
   585  		if _, err := verifier.Write(canonical); err != nil {
   586  			return "", err
   587  		}
   588  		if !verifier.Verified() {
   589  			err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest())
   590  			logrus.Error(err)
   591  			return "", err
   592  		}
   593  		return digested.Digest(), nil
   594  	}
   595  
   596  	return digest.FromBytes(canonical), nil
   597  }
   598  
   599  // allowV1Fallback checks if the error is a possible reason to fallback to v1
   600  // (even if confirmedV2 has been set already), and if so, wraps the error in
   601  // a fallbackError with confirmedV2 set to false. Otherwise, it returns the
   602  // error unmodified.
   603  func allowV1Fallback(err error) error {
   604  	switch v := err.(type) {
   605  	case errcode.Errors:
   606  		if len(v) != 0 {
   607  			if v0, ok := v[0].(errcode.Error); ok && registry.ShouldV2Fallback(v0) {
   608  				return fallbackError{err: err, confirmedV2: false}
   609  			}
   610  		}
   611  	case errcode.Error:
   612  		if registry.ShouldV2Fallback(v) {
   613  			return fallbackError{err: err, confirmedV2: false}
   614  		}
   615  	}
   616  
   617  	return err
   618  }
   619  
   620  func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) {
   621  	// If pull by digest, then verify the manifest digest. NOTE: It is
   622  	// important to do this first, before any other content validation. If the
   623  	// digest cannot be verified, don't even bother with those other things.
   624  	if digested, isCanonical := ref.(reference.Canonical); isCanonical {
   625  		verifier, err := digest.NewDigestVerifier(digested.Digest())
   626  		if err != nil {
   627  			return nil, err
   628  		}
   629  		if _, err := verifier.Write(signedManifest.Canonical); err != nil {
   630  			return nil, err
   631  		}
   632  		if !verifier.Verified() {
   633  			err := fmt.Errorf("image verification failed for digest %s", digested.Digest())
   634  			logrus.Error(err)
   635  			return nil, err
   636  		}
   637  	}
   638  	m = &signedManifest.Manifest
   639  
   640  	if m.SchemaVersion != 1 {
   641  		return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String())
   642  	}
   643  	if len(m.FSLayers) != len(m.History) {
   644  		return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String())
   645  	}
   646  	if len(m.FSLayers) == 0 {
   647  		return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String())
   648  	}
   649  	return m, nil
   650  }
   651  
   652  // fixManifestLayers removes repeated layers from the manifest and checks the
   653  // correctness of the parent chain.
   654  func fixManifestLayers(m *schema1.Manifest) error {
   655  	imgs := make([]*image.V1Image, len(m.FSLayers))
   656  	for i := range m.FSLayers {
   657  		img := &image.V1Image{}
   658  
   659  		if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil {
   660  			return err
   661  		}
   662  
   663  		imgs[i] = img
   664  		if err := v1.ValidateID(img.ID); err != nil {
   665  			return err
   666  		}
   667  	}
   668  
   669  	if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" {
   670  		// Windows base layer can point to a base layer parent that is not in manifest.
   671  		return errors.New("Invalid parent ID in the base layer of the image.")
   672  	}
   673  
   674  	// check general duplicates to error instead of a deadlock
   675  	idmap := make(map[string]struct{})
   676  
   677  	var lastID string
   678  	for _, img := range imgs {
   679  		// skip IDs that appear after each other, we handle those later
   680  		if _, exists := idmap[img.ID]; img.ID != lastID && exists {
   681  			return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
   682  		}
   683  		lastID = img.ID
   684  		idmap[lastID] = struct{}{}
   685  	}
   686  
   687  	// backwards loop so that we keep the remaining indexes after removing items
   688  	for i := len(imgs) - 2; i >= 0; i-- {
   689  		if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
   690  			m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
   691  			m.History = append(m.History[:i], m.History[i+1:]...)
   692  		} else if imgs[i].Parent != imgs[i+1].ID {
   693  			return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent)
   694  		}
   695  	}
   696  
   697  	return nil
   698  }