github.com/vincentwoo/docker@v0.7.3-0.20160116130405-82401a4b13c0/distribution/pull_v2.go (about)

     1  package distribution
     2  
     3  import (
     4  	"encoding/json"
     5  	"errors"
     6  	"fmt"
     7  	"io"
     8  	"io/ioutil"
     9  	"os"
    10  	"runtime"
    11  
    12  	"github.com/Sirupsen/logrus"
    13  	"github.com/docker/distribution"
    14  	"github.com/docker/distribution/digest"
    15  	"github.com/docker/distribution/manifest/manifestlist"
    16  	"github.com/docker/distribution/manifest/schema1"
    17  	"github.com/docker/distribution/manifest/schema2"
    18  	"github.com/docker/distribution/registry/api/errcode"
    19  	"github.com/docker/distribution/registry/client"
    20  	"github.com/docker/docker/distribution/metadata"
    21  	"github.com/docker/docker/distribution/xfer"
    22  	"github.com/docker/docker/image"
    23  	"github.com/docker/docker/image/v1"
    24  	"github.com/docker/docker/layer"
    25  	"github.com/docker/docker/pkg/ioutils"
    26  	"github.com/docker/docker/pkg/progress"
    27  	"github.com/docker/docker/pkg/stringid"
    28  	"github.com/docker/docker/reference"
    29  	"github.com/docker/docker/registry"
    30  	"golang.org/x/net/context"
    31  )
    32  
    33  var errRootFSMismatch = errors.New("layers from manifest don't match image configuration")
    34  
    35  type v2Puller struct {
    36  	V2MetadataService *metadata.V2MetadataService
    37  	endpoint          registry.APIEndpoint
    38  	config            *ImagePullConfig
    39  	repoInfo          *registry.RepositoryInfo
    40  	repo              distribution.Repository
    41  	// confirmedV2 is set to true if we confirm we're talking to a v2
    42  	// registry. This is used to limit fallbacks to the v1 protocol.
    43  	confirmedV2 bool
    44  }
    45  
    46  func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) {
    47  	// TODO(tiborvass): was ReceiveTimeout
    48  	p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull")
    49  	if err != nil {
    50  		logrus.Warnf("Error getting v2 registry: %v", err)
    51  		return fallbackError{err: err, confirmedV2: p.confirmedV2}
    52  	}
    53  
    54  	if err = p.pullV2Repository(ctx, ref); err != nil {
    55  		if _, ok := err.(fallbackError); ok {
    56  			return err
    57  		}
    58  		if registry.ContinueOnError(err) {
    59  			logrus.Debugf("Error trying v2 registry: %v", err)
    60  			return fallbackError{err: err, confirmedV2: p.confirmedV2}
    61  		}
    62  	}
    63  	return err
    64  }
    65  
    66  func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) {
    67  	var layersDownloaded bool
    68  	if !reference.IsNameOnly(ref) {
    69  		layersDownloaded, err = p.pullV2Tag(ctx, ref)
    70  		if err != nil {
    71  			return err
    72  		}
    73  	} else {
    74  		tags, err := p.repo.Tags(ctx).All(ctx)
    75  		if err != nil {
    76  			// If this repository doesn't exist on V2, we should
    77  			// permit a fallback to V1.
    78  			return allowV1Fallback(err)
    79  		}
    80  
    81  		// The v2 registry knows about this repository, so we will not
    82  		// allow fallback to the v1 protocol even if we encounter an
    83  		// error later on.
    84  		p.confirmedV2 = true
    85  
    86  		for _, tag := range tags {
    87  			tagRef, err := reference.WithTag(ref, tag)
    88  			if err != nil {
    89  				return err
    90  			}
    91  			pulledNew, err := p.pullV2Tag(ctx, tagRef)
    92  			if err != nil {
    93  				// Since this is the pull-all-tags case, don't
    94  				// allow an error pulling a particular tag to
    95  				// make the whole pull fall back to v1.
    96  				if fallbackErr, ok := err.(fallbackError); ok {
    97  					return fallbackErr.err
    98  				}
    99  				return err
   100  			}
   101  			// pulledNew is true if either new layers were downloaded OR if existing images were newly tagged
   102  			// TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus?
   103  			layersDownloaded = layersDownloaded || pulledNew
   104  		}
   105  	}
   106  
   107  	writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded)
   108  
   109  	return nil
   110  }
   111  
   112  type v2LayerDescriptor struct {
   113  	digest            digest.Digest
   114  	repoInfo          *registry.RepositoryInfo
   115  	repo              distribution.Repository
   116  	V2MetadataService *metadata.V2MetadataService
   117  }
   118  
   119  func (ld *v2LayerDescriptor) Key() string {
   120  	return "v2:" + ld.digest.String()
   121  }
   122  
   123  func (ld *v2LayerDescriptor) ID() string {
   124  	return stringid.TruncateID(ld.digest.String())
   125  }
   126  
   127  func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) {
   128  	return ld.V2MetadataService.GetDiffID(ld.digest)
   129  }
   130  
   131  func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
   132  	logrus.Debugf("pulling blob %q", ld.digest)
   133  
   134  	blobs := ld.repo.Blobs(ctx)
   135  
   136  	layerDownload, err := blobs.Open(ctx, ld.digest)
   137  	if err != nil {
   138  		logrus.Debugf("Error statting layer: %v", err)
   139  		if err == distribution.ErrBlobUnknown {
   140  			return nil, 0, xfer.DoNotRetry{Err: err}
   141  		}
   142  		return nil, 0, retryOnError(err)
   143  	}
   144  
   145  	size, err := layerDownload.Seek(0, os.SEEK_END)
   146  	if err != nil {
   147  		// Seek failed, perhaps because there was no Content-Length
   148  		// header. This shouldn't fail the download, because we can
   149  		// still continue without a progress bar.
   150  		size = 0
   151  	} else {
   152  		// Restore the seek offset at the beginning of the stream.
   153  		_, err = layerDownload.Seek(0, os.SEEK_SET)
   154  		if err != nil {
   155  			return nil, 0, err
   156  		}
   157  	}
   158  
   159  	reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size, ld.ID(), "Downloading")
   160  	defer reader.Close()
   161  
   162  	verifier, err := digest.NewDigestVerifier(ld.digest)
   163  	if err != nil {
   164  		return nil, 0, xfer.DoNotRetry{Err: err}
   165  	}
   166  
   167  	tmpFile, err := ioutil.TempFile("", "GetImageBlob")
   168  	if err != nil {
   169  		return nil, 0, xfer.DoNotRetry{Err: err}
   170  	}
   171  
   172  	_, err = io.Copy(tmpFile, io.TeeReader(reader, verifier))
   173  	if err != nil {
   174  		return nil, 0, retryOnError(err)
   175  	}
   176  
   177  	progress.Update(progressOutput, ld.ID(), "Verifying Checksum")
   178  
   179  	if !verifier.Verified() {
   180  		err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest)
   181  		logrus.Error(err)
   182  		tmpFile.Close()
   183  		if err := os.RemoveAll(tmpFile.Name()); err != nil {
   184  			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
   185  		}
   186  
   187  		return nil, 0, xfer.DoNotRetry{Err: err}
   188  	}
   189  
   190  	progress.Update(progressOutput, ld.ID(), "Download complete")
   191  
   192  	logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name())
   193  
   194  	tmpFile.Seek(0, 0)
   195  	return ioutils.NewReadCloserWrapper(tmpFile, tmpFileCloser(tmpFile)), size, nil
   196  }
   197  
   198  func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) {
   199  	// Cache mapping from this layer's DiffID to the blobsum
   200  	ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.FullName()})
   201  }
   202  
   203  func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) {
   204  	manSvc, err := p.repo.Manifests(ctx)
   205  	if err != nil {
   206  		return false, err
   207  	}
   208  
   209  	var (
   210  		manifest    distribution.Manifest
   211  		tagOrDigest string // Used for logging/progress only
   212  	)
   213  	if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
   214  		// NOTE: not using TagService.Get, since it uses HEAD requests
   215  		// against the manifests endpoint, which are not supported by
   216  		// all registry versions.
   217  		manifest, err = manSvc.Get(ctx, "", client.WithTag(tagged.Tag()))
   218  		if err != nil {
   219  			return false, allowV1Fallback(err)
   220  		}
   221  		tagOrDigest = tagged.Tag()
   222  	} else if digested, isDigested := ref.(reference.Canonical); isDigested {
   223  		manifest, err = manSvc.Get(ctx, digested.Digest())
   224  		if err != nil {
   225  			return false, err
   226  		}
   227  		tagOrDigest = digested.Digest().String()
   228  	} else {
   229  		return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String())
   230  	}
   231  
   232  	if manifest == nil {
   233  		return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest)
   234  	}
   235  
   236  	// If manSvc.Get succeeded, we can be confident that the registry on
   237  	// the other side speaks the v2 protocol.
   238  	p.confirmedV2 = true
   239  
   240  	logrus.Debugf("Pulling ref from V2 registry: %s", ref.String())
   241  	progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Name())
   242  
   243  	var (
   244  		imageID        image.ID
   245  		manifestDigest digest.Digest
   246  	)
   247  
   248  	switch v := manifest.(type) {
   249  	case *schema1.SignedManifest:
   250  		imageID, manifestDigest, err = p.pullSchema1(ctx, ref, v)
   251  		if err != nil {
   252  			return false, err
   253  		}
   254  	case *schema2.DeserializedManifest:
   255  		imageID, manifestDigest, err = p.pullSchema2(ctx, ref, v)
   256  		if err != nil {
   257  			return false, err
   258  		}
   259  	case *manifestlist.DeserializedManifestList:
   260  		imageID, manifestDigest, err = p.pullManifestList(ctx, ref, v)
   261  		if err != nil {
   262  			return false, err
   263  		}
   264  	default:
   265  		return false, errors.New("unsupported manifest format")
   266  	}
   267  
   268  	progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())
   269  
   270  	oldTagImageID, err := p.config.ReferenceStore.Get(ref)
   271  	if err == nil {
   272  		if oldTagImageID == imageID {
   273  			return false, nil
   274  		}
   275  	} else if err != reference.ErrDoesNotExist {
   276  		return false, err
   277  	}
   278  
   279  	if canonical, ok := ref.(reference.Canonical); ok {
   280  		if err = p.config.ReferenceStore.AddDigest(canonical, imageID, true); err != nil {
   281  			return false, err
   282  		}
   283  	} else if err = p.config.ReferenceStore.AddTag(ref, imageID, true); err != nil {
   284  		return false, err
   285  	}
   286  
   287  	return true, nil
   288  }
   289  
   290  func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) {
   291  	var verifiedManifest *schema1.Manifest
   292  	verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref)
   293  	if err != nil {
   294  		return "", "", err
   295  	}
   296  
   297  	rootFS := image.NewRootFS()
   298  
   299  	if err := detectBaseLayer(p.config.ImageStore, verifiedManifest, rootFS); err != nil {
   300  		return "", "", err
   301  	}
   302  
   303  	// remove duplicate layers and check parent chain validity
   304  	err = fixManifestLayers(verifiedManifest)
   305  	if err != nil {
   306  		return "", "", err
   307  	}
   308  
   309  	var descriptors []xfer.DownloadDescriptor
   310  
   311  	// Image history converted to the new format
   312  	var history []image.History
   313  
   314  	// Note that the order of this loop is in the direction of bottom-most
   315  	// to top-most, so that the downloads slice gets ordered correctly.
   316  	for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- {
   317  		blobSum := verifiedManifest.FSLayers[i].BlobSum
   318  
   319  		var throwAway struct {
   320  			ThrowAway bool `json:"throwaway,omitempty"`
   321  		}
   322  		if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil {
   323  			return "", "", err
   324  		}
   325  
   326  		h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway)
   327  		if err != nil {
   328  			return "", "", err
   329  		}
   330  		history = append(history, h)
   331  
   332  		if throwAway.ThrowAway {
   333  			continue
   334  		}
   335  
   336  		layerDescriptor := &v2LayerDescriptor{
   337  			digest:            blobSum,
   338  			repoInfo:          p.repoInfo,
   339  			repo:              p.repo,
   340  			V2MetadataService: p.V2MetadataService,
   341  		}
   342  
   343  		descriptors = append(descriptors, layerDescriptor)
   344  	}
   345  
   346  	resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput)
   347  	if err != nil {
   348  		return "", "", err
   349  	}
   350  	defer release()
   351  
   352  	config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history)
   353  	if err != nil {
   354  		return "", "", err
   355  	}
   356  
   357  	imageID, err = p.config.ImageStore.Create(config)
   358  	if err != nil {
   359  		return "", "", err
   360  	}
   361  
   362  	manifestDigest = digest.FromBytes(unverifiedManifest.Canonical)
   363  
   364  	return imageID, manifestDigest, nil
   365  }
   366  
   367  func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) {
   368  	manifestDigest, err = schema2ManifestDigest(ref, mfst)
   369  	if err != nil {
   370  		return "", "", err
   371  	}
   372  
   373  	target := mfst.Target()
   374  	imageID = image.ID(target.Digest)
   375  	if _, err := p.config.ImageStore.Get(imageID); err == nil {
   376  		// If the image already exists locally, no need to pull
   377  		// anything.
   378  		return imageID, manifestDigest, nil
   379  	}
   380  
   381  	configChan := make(chan []byte, 1)
   382  	errChan := make(chan error, 1)
   383  	var cancel func()
   384  	ctx, cancel = context.WithCancel(ctx)
   385  
   386  	// Pull the image config
   387  	go func() {
   388  		configJSON, err := p.pullSchema2ImageConfig(ctx, target.Digest)
   389  		if err != nil {
   390  			errChan <- err
   391  			cancel()
   392  			return
   393  		}
   394  		configChan <- configJSON
   395  	}()
   396  
   397  	var descriptors []xfer.DownloadDescriptor
   398  
   399  	// Note that the order of this loop is in the direction of bottom-most
   400  	// to top-most, so that the downloads slice gets ordered correctly.
   401  	for _, d := range mfst.References() {
   402  		layerDescriptor := &v2LayerDescriptor{
   403  			digest:            d.Digest,
   404  			repo:              p.repo,
   405  			repoInfo:          p.repoInfo,
   406  			V2MetadataService: p.V2MetadataService,
   407  		}
   408  
   409  		descriptors = append(descriptors, layerDescriptor)
   410  	}
   411  
   412  	var (
   413  		configJSON         []byte       // raw serialized image config
   414  		unmarshalledConfig image.Image  // deserialized image config
   415  		downloadRootFS     image.RootFS // rootFS to use for registering layers.
   416  	)
   417  	if runtime.GOOS == "windows" {
   418  		configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan)
   419  		if err != nil {
   420  			return "", "", err
   421  		}
   422  		if unmarshalledConfig.RootFS == nil {
   423  			return "", "", errors.New("image config has no rootfs section")
   424  		}
   425  		downloadRootFS = *unmarshalledConfig.RootFS
   426  		downloadRootFS.DiffIDs = []layer.DiffID{}
   427  	} else {
   428  		downloadRootFS = *image.NewRootFS()
   429  	}
   430  
   431  	rootFS, release, err := p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput)
   432  	if err != nil {
   433  		if configJSON != nil {
   434  			// Already received the config
   435  			return "", "", err
   436  		}
   437  		select {
   438  		case err = <-errChan:
   439  			return "", "", err
   440  		default:
   441  			cancel()
   442  			select {
   443  			case <-configChan:
   444  			case <-errChan:
   445  			}
   446  			return "", "", err
   447  		}
   448  	}
   449  	defer release()
   450  
   451  	if configJSON == nil {
   452  		configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan)
   453  		if err != nil {
   454  			return "", "", err
   455  		}
   456  	}
   457  
   458  	// The DiffIDs returned in rootFS MUST match those in the config.
   459  	// Otherwise the image config could be referencing layers that aren't
   460  	// included in the manifest.
   461  	if len(rootFS.DiffIDs) != len(unmarshalledConfig.RootFS.DiffIDs) {
   462  		return "", "", errRootFSMismatch
   463  	}
   464  
   465  	for i := range rootFS.DiffIDs {
   466  		if rootFS.DiffIDs[i] != unmarshalledConfig.RootFS.DiffIDs[i] {
   467  			return "", "", errRootFSMismatch
   468  		}
   469  	}
   470  
   471  	imageID, err = p.config.ImageStore.Create(configJSON)
   472  	if err != nil {
   473  		return "", "", err
   474  	}
   475  
   476  	return imageID, manifestDigest, nil
   477  }
   478  
   479  func receiveConfig(configChan <-chan []byte, errChan <-chan error) ([]byte, image.Image, error) {
   480  	select {
   481  	case configJSON := <-configChan:
   482  		var unmarshalledConfig image.Image
   483  		if err := json.Unmarshal(configJSON, &unmarshalledConfig); err != nil {
   484  			return nil, image.Image{}, err
   485  		}
   486  		return configJSON, unmarshalledConfig, nil
   487  	case err := <-errChan:
   488  		return nil, image.Image{}, err
   489  		// Don't need a case for ctx.Done in the select because cancellation
   490  		// will trigger an error in p.pullSchema2ImageConfig.
   491  	}
   492  }
   493  
   494  // pullManifestList handles "manifest lists" which point to various
   495  // platform-specifc manifests.
   496  func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (imageID image.ID, manifestListDigest digest.Digest, err error) {
   497  	manifestListDigest, err = schema2ManifestDigest(ref, mfstList)
   498  	if err != nil {
   499  		return "", "", err
   500  	}
   501  
   502  	var manifestDigest digest.Digest
   503  	for _, manifestDescriptor := range mfstList.Manifests {
   504  		// TODO(aaronl): The manifest list spec supports optional
   505  		// "features" and "variant" fields. These are not yet used.
   506  		// Once they are, their values should be interpreted here.
   507  		if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS {
   508  			manifestDigest = manifestDescriptor.Digest
   509  			break
   510  		}
   511  	}
   512  
   513  	if manifestDigest == "" {
   514  		return "", "", errors.New("no supported platform found in manifest list")
   515  	}
   516  
   517  	manSvc, err := p.repo.Manifests(ctx)
   518  	if err != nil {
   519  		return "", "", err
   520  	}
   521  
   522  	manifest, err := manSvc.Get(ctx, manifestDigest)
   523  	if err != nil {
   524  		return "", "", err
   525  	}
   526  
   527  	manifestRef, err := reference.WithDigest(ref, manifestDigest)
   528  	if err != nil {
   529  		return "", "", err
   530  	}
   531  
   532  	switch v := manifest.(type) {
   533  	case *schema1.SignedManifest:
   534  		imageID, _, err = p.pullSchema1(ctx, manifestRef, v)
   535  		if err != nil {
   536  			return "", "", err
   537  		}
   538  	case *schema2.DeserializedManifest:
   539  		imageID, _, err = p.pullSchema2(ctx, manifestRef, v)
   540  		if err != nil {
   541  			return "", "", err
   542  		}
   543  	default:
   544  		return "", "", errors.New("unsupported manifest format")
   545  	}
   546  
   547  	return imageID, manifestListDigest, err
   548  }
   549  
   550  func (p *v2Puller) pullSchema2ImageConfig(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) {
   551  	blobs := p.repo.Blobs(ctx)
   552  	configJSON, err = blobs.Get(ctx, dgst)
   553  	if err != nil {
   554  		return nil, err
   555  	}
   556  
   557  	// Verify image config digest
   558  	verifier, err := digest.NewDigestVerifier(dgst)
   559  	if err != nil {
   560  		return nil, err
   561  	}
   562  	if _, err := verifier.Write(configJSON); err != nil {
   563  		return nil, err
   564  	}
   565  	if !verifier.Verified() {
   566  		err := fmt.Errorf("image config verification failed for digest %s", dgst)
   567  		logrus.Error(err)
   568  		return nil, err
   569  	}
   570  
   571  	return configJSON, nil
   572  }
   573  
   574  // schema2ManifestDigest computes the manifest digest, and, if pulling by
   575  // digest, ensures that it matches the requested digest.
   576  func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) {
   577  	_, canonical, err := mfst.Payload()
   578  	if err != nil {
   579  		return "", err
   580  	}
   581  
   582  	// If pull by digest, then verify the manifest digest.
   583  	if digested, isDigested := ref.(reference.Canonical); isDigested {
   584  		verifier, err := digest.NewDigestVerifier(digested.Digest())
   585  		if err != nil {
   586  			return "", err
   587  		}
   588  		if _, err := verifier.Write(canonical); err != nil {
   589  			return "", err
   590  		}
   591  		if !verifier.Verified() {
   592  			err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest())
   593  			logrus.Error(err)
   594  			return "", err
   595  		}
   596  		return digested.Digest(), nil
   597  	}
   598  
   599  	return digest.FromBytes(canonical), nil
   600  }
   601  
   602  // allowV1Fallback checks if the error is a possible reason to fallback to v1
   603  // (even if confirmedV2 has been set already), and if so, wraps the error in
   604  // a fallbackError with confirmedV2 set to false. Otherwise, it returns the
   605  // error unmodified.
   606  func allowV1Fallback(err error) error {
   607  	switch v := err.(type) {
   608  	case errcode.Errors:
   609  		if len(v) != 0 {
   610  			if v0, ok := v[0].(errcode.Error); ok && registry.ShouldV2Fallback(v0) {
   611  				return fallbackError{err: err, confirmedV2: false}
   612  			}
   613  		}
   614  	case errcode.Error:
   615  		if registry.ShouldV2Fallback(v) {
   616  			return fallbackError{err: err, confirmedV2: false}
   617  		}
   618  	}
   619  
   620  	return err
   621  }
   622  
   623  func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) {
   624  	// If pull by digest, then verify the manifest digest. NOTE: It is
   625  	// important to do this first, before any other content validation. If the
   626  	// digest cannot be verified, don't even bother with those other things.
   627  	if digested, isCanonical := ref.(reference.Canonical); isCanonical {
   628  		verifier, err := digest.NewDigestVerifier(digested.Digest())
   629  		if err != nil {
   630  			return nil, err
   631  		}
   632  		if _, err := verifier.Write(signedManifest.Canonical); err != nil {
   633  			return nil, err
   634  		}
   635  		if !verifier.Verified() {
   636  			err := fmt.Errorf("image verification failed for digest %s", digested.Digest())
   637  			logrus.Error(err)
   638  			return nil, err
   639  		}
   640  	}
   641  	m = &signedManifest.Manifest
   642  
   643  	if m.SchemaVersion != 1 {
   644  		return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String())
   645  	}
   646  	if len(m.FSLayers) != len(m.History) {
   647  		return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String())
   648  	}
   649  	if len(m.FSLayers) == 0 {
   650  		return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String())
   651  	}
   652  	return m, nil
   653  }
   654  
   655  // fixManifestLayers removes repeated layers from the manifest and checks the
   656  // correctness of the parent chain.
   657  func fixManifestLayers(m *schema1.Manifest) error {
   658  	imgs := make([]*image.V1Image, len(m.FSLayers))
   659  	for i := range m.FSLayers {
   660  		img := &image.V1Image{}
   661  
   662  		if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil {
   663  			return err
   664  		}
   665  
   666  		imgs[i] = img
   667  		if err := v1.ValidateID(img.ID); err != nil {
   668  			return err
   669  		}
   670  	}
   671  
   672  	if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" {
   673  		// Windows base layer can point to a base layer parent that is not in manifest.
   674  		return errors.New("Invalid parent ID in the base layer of the image.")
   675  	}
   676  
   677  	// check general duplicates to error instead of a deadlock
   678  	idmap := make(map[string]struct{})
   679  
   680  	var lastID string
   681  	for _, img := range imgs {
   682  		// skip IDs that appear after each other, we handle those later
   683  		if _, exists := idmap[img.ID]; img.ID != lastID && exists {
   684  			return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
   685  		}
   686  		lastID = img.ID
   687  		idmap[lastID] = struct{}{}
   688  	}
   689  
   690  	// backwards loop so that we keep the remaining indexes after removing items
   691  	for i := len(imgs) - 2; i >= 0; i-- {
   692  		if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
   693  			m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
   694  			m.History = append(m.History[:i], m.History[i+1:]...)
   695  		} else if imgs[i].Parent != imgs[i+1].ID {
   696  			return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent)
   697  		}
   698  	}
   699  
   700  	return nil
   701  }