github.com/skf/moby@v1.13.1/distribution/pull_v2.go (about)

     1  package distribution
     2  
     3  import (
     4  	"encoding/json"
     5  	"errors"
     6  	"fmt"
     7  	"io"
     8  	"io/ioutil"
     9  	"net/url"
    10  	"os"
    11  	"runtime"
    12  
    13  	"github.com/Sirupsen/logrus"
    14  	"github.com/docker/distribution"
    15  	"github.com/docker/distribution/digest"
    16  	"github.com/docker/distribution/manifest/manifestlist"
    17  	"github.com/docker/distribution/manifest/schema1"
    18  	"github.com/docker/distribution/manifest/schema2"
    19  	"github.com/docker/distribution/registry/api/errcode"
    20  	"github.com/docker/distribution/registry/client/auth"
    21  	"github.com/docker/distribution/registry/client/transport"
    22  	"github.com/docker/docker/distribution/metadata"
    23  	"github.com/docker/docker/distribution/xfer"
    24  	"github.com/docker/docker/image"
    25  	"github.com/docker/docker/image/v1"
    26  	"github.com/docker/docker/layer"
    27  	"github.com/docker/docker/pkg/ioutils"
    28  	"github.com/docker/docker/pkg/progress"
    29  	"github.com/docker/docker/pkg/stringid"
    30  	"github.com/docker/docker/reference"
    31  	"github.com/docker/docker/registry"
    32  	"golang.org/x/net/context"
    33  )
    34  
    35  var (
    36  	errRootFSMismatch = errors.New("layers from manifest don't match image configuration")
    37  	errRootFSInvalid  = errors.New("invalid rootfs in image configuration")
    38  )
    39  
    40  // ImageConfigPullError is an error pulling the image config blob
    41  // (only applies to schema2).
    42  type ImageConfigPullError struct {
    43  	Err error
    44  }
    45  
    46  // Error returns the error string for ImageConfigPullError.
    47  func (e ImageConfigPullError) Error() string {
    48  	return "error pulling image configuration: " + e.Err.Error()
    49  }
    50  
    51  type v2Puller struct {
    52  	V2MetadataService metadata.V2MetadataService
    53  	endpoint          registry.APIEndpoint
    54  	config            *ImagePullConfig
    55  	repoInfo          *registry.RepositoryInfo
    56  	repo              distribution.Repository
    57  	// confirmedV2 is set to true if we confirm we're talking to a v2
    58  	// registry. This is used to limit fallbacks to the v1 protocol.
    59  	confirmedV2 bool
    60  }
    61  
    62  func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) {
    63  	// TODO(tiborvass): was ReceiveTimeout
    64  	p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull")
    65  	if err != nil {
    66  		logrus.Warnf("Error getting v2 registry: %v", err)
    67  		return err
    68  	}
    69  
    70  	if err = p.pullV2Repository(ctx, ref); err != nil {
    71  		if _, ok := err.(fallbackError); ok {
    72  			return err
    73  		}
    74  		if continueOnError(err) {
    75  			logrus.Errorf("Error trying v2 registry: %v", err)
    76  			return fallbackError{
    77  				err:         err,
    78  				confirmedV2: p.confirmedV2,
    79  				transportOK: true,
    80  			}
    81  		}
    82  	}
    83  	return err
    84  }
    85  
    86  func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) {
    87  	var layersDownloaded bool
    88  	if !reference.IsNameOnly(ref) {
    89  		layersDownloaded, err = p.pullV2Tag(ctx, ref)
    90  		if err != nil {
    91  			return err
    92  		}
    93  	} else {
    94  		tags, err := p.repo.Tags(ctx).All(ctx)
    95  		if err != nil {
    96  			// If this repository doesn't exist on V2, we should
    97  			// permit a fallback to V1.
    98  			return allowV1Fallback(err)
    99  		}
   100  
   101  		// The v2 registry knows about this repository, so we will not
   102  		// allow fallback to the v1 protocol even if we encounter an
   103  		// error later on.
   104  		p.confirmedV2 = true
   105  
   106  		for _, tag := range tags {
   107  			tagRef, err := reference.WithTag(ref, tag)
   108  			if err != nil {
   109  				return err
   110  			}
   111  			pulledNew, err := p.pullV2Tag(ctx, tagRef)
   112  			if err != nil {
   113  				// Since this is the pull-all-tags case, don't
   114  				// allow an error pulling a particular tag to
   115  				// make the whole pull fall back to v1.
   116  				if fallbackErr, ok := err.(fallbackError); ok {
   117  					return fallbackErr.err
   118  				}
   119  				return err
   120  			}
   121  			// pulledNew is true if either new layers were downloaded OR if existing images were newly tagged
   122  			// TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus?
   123  			layersDownloaded = layersDownloaded || pulledNew
   124  		}
   125  	}
   126  
   127  	writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded)
   128  
   129  	return nil
   130  }
   131  
   132  type v2LayerDescriptor struct {
   133  	digest            digest.Digest
   134  	repoInfo          *registry.RepositoryInfo
   135  	repo              distribution.Repository
   136  	V2MetadataService metadata.V2MetadataService
   137  	tmpFile           *os.File
   138  	verifier          digest.Verifier
   139  	src               distribution.Descriptor
   140  }
   141  
   142  func (ld *v2LayerDescriptor) Key() string {
   143  	return "v2:" + ld.digest.String()
   144  }
   145  
   146  func (ld *v2LayerDescriptor) ID() string {
   147  	return stringid.TruncateID(ld.digest.String())
   148  }
   149  
   150  func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) {
   151  	return ld.V2MetadataService.GetDiffID(ld.digest)
   152  }
   153  
   154  func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
   155  	logrus.Debugf("pulling blob %q", ld.digest)
   156  
   157  	var (
   158  		err    error
   159  		offset int64
   160  	)
   161  
   162  	if ld.tmpFile == nil {
   163  		ld.tmpFile, err = createDownloadFile()
   164  		if err != nil {
   165  			return nil, 0, xfer.DoNotRetry{Err: err}
   166  		}
   167  	} else {
   168  		offset, err = ld.tmpFile.Seek(0, os.SEEK_END)
   169  		if err != nil {
   170  			logrus.Debugf("error seeking to end of download file: %v", err)
   171  			offset = 0
   172  
   173  			ld.tmpFile.Close()
   174  			if err := os.Remove(ld.tmpFile.Name()); err != nil {
   175  				logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
   176  			}
   177  			ld.tmpFile, err = createDownloadFile()
   178  			if err != nil {
   179  				return nil, 0, xfer.DoNotRetry{Err: err}
   180  			}
   181  		} else if offset != 0 {
   182  			logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset)
   183  		}
   184  	}
   185  
   186  	tmpFile := ld.tmpFile
   187  
   188  	layerDownload, err := ld.open(ctx)
   189  	if err != nil {
   190  		logrus.Errorf("Error initiating layer download: %v", err)
   191  		return nil, 0, retryOnError(err)
   192  	}
   193  
   194  	if offset != 0 {
   195  		_, err := layerDownload.Seek(offset, os.SEEK_SET)
   196  		if err != nil {
   197  			if err := ld.truncateDownloadFile(); err != nil {
   198  				return nil, 0, xfer.DoNotRetry{Err: err}
   199  			}
   200  			return nil, 0, err
   201  		}
   202  	}
   203  	size, err := layerDownload.Seek(0, os.SEEK_END)
   204  	if err != nil {
   205  		// Seek failed, perhaps because there was no Content-Length
   206  		// header. This shouldn't fail the download, because we can
   207  		// still continue without a progress bar.
   208  		size = 0
   209  	} else {
   210  		if size != 0 && offset > size {
   211  			logrus.Debug("Partial download is larger than full blob. Starting over")
   212  			offset = 0
   213  			if err := ld.truncateDownloadFile(); err != nil {
   214  				return nil, 0, xfer.DoNotRetry{Err: err}
   215  			}
   216  		}
   217  
   218  		// Restore the seek offset either at the beginning of the
   219  		// stream, or just after the last byte we have from previous
   220  		// attempts.
   221  		_, err = layerDownload.Seek(offset, os.SEEK_SET)
   222  		if err != nil {
   223  			return nil, 0, err
   224  		}
   225  	}
   226  
   227  	reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading")
   228  	defer reader.Close()
   229  
   230  	if ld.verifier == nil {
   231  		ld.verifier, err = digest.NewDigestVerifier(ld.digest)
   232  		if err != nil {
   233  			return nil, 0, xfer.DoNotRetry{Err: err}
   234  		}
   235  	}
   236  
   237  	_, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier))
   238  	if err != nil {
   239  		if err == transport.ErrWrongCodeForByteRange {
   240  			if err := ld.truncateDownloadFile(); err != nil {
   241  				return nil, 0, xfer.DoNotRetry{Err: err}
   242  			}
   243  			return nil, 0, err
   244  		}
   245  		return nil, 0, retryOnError(err)
   246  	}
   247  
   248  	progress.Update(progressOutput, ld.ID(), "Verifying Checksum")
   249  
   250  	if !ld.verifier.Verified() {
   251  		err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest)
   252  		logrus.Error(err)
   253  
   254  		// Allow a retry if this digest verification error happened
   255  		// after a resumed download.
   256  		if offset != 0 {
   257  			if err := ld.truncateDownloadFile(); err != nil {
   258  				return nil, 0, xfer.DoNotRetry{Err: err}
   259  			}
   260  
   261  			return nil, 0, err
   262  		}
   263  		return nil, 0, xfer.DoNotRetry{Err: err}
   264  	}
   265  
   266  	progress.Update(progressOutput, ld.ID(), "Download complete")
   267  
   268  	logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name())
   269  
   270  	_, err = tmpFile.Seek(0, os.SEEK_SET)
   271  	if err != nil {
   272  		tmpFile.Close()
   273  		if err := os.Remove(tmpFile.Name()); err != nil {
   274  			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
   275  		}
   276  		ld.tmpFile = nil
   277  		ld.verifier = nil
   278  		return nil, 0, xfer.DoNotRetry{Err: err}
   279  	}
   280  
   281  	// hand off the temporary file to the download manager, so it will only
   282  	// be closed once
   283  	ld.tmpFile = nil
   284  
   285  	return ioutils.NewReadCloserWrapper(tmpFile, func() error {
   286  		tmpFile.Close()
   287  		err := os.RemoveAll(tmpFile.Name())
   288  		if err != nil {
   289  			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
   290  		}
   291  		return err
   292  	}), size, nil
   293  }
   294  
   295  func (ld *v2LayerDescriptor) Close() {
   296  	if ld.tmpFile != nil {
   297  		ld.tmpFile.Close()
   298  		if err := os.RemoveAll(ld.tmpFile.Name()); err != nil {
   299  			logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
   300  		}
   301  	}
   302  }
   303  
   304  func (ld *v2LayerDescriptor) truncateDownloadFile() error {
   305  	// Need a new hash context since we will be redoing the download
   306  	ld.verifier = nil
   307  
   308  	if _, err := ld.tmpFile.Seek(0, os.SEEK_SET); err != nil {
   309  		logrus.Errorf("error seeking to beginning of download file: %v", err)
   310  		return err
   311  	}
   312  
   313  	if err := ld.tmpFile.Truncate(0); err != nil {
   314  		logrus.Errorf("error truncating download file: %v", err)
   315  		return err
   316  	}
   317  
   318  	return nil
   319  }
   320  
   321  func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) {
   322  	// Cache mapping from this layer's DiffID to the blobsum
   323  	ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.FullName()})
   324  }
   325  
   326  func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) {
   327  	manSvc, err := p.repo.Manifests(ctx)
   328  	if err != nil {
   329  		return false, err
   330  	}
   331  
   332  	var (
   333  		manifest    distribution.Manifest
   334  		tagOrDigest string // Used for logging/progress only
   335  	)
   336  	if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
   337  		manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag()))
   338  		if err != nil {
   339  			return false, allowV1Fallback(err)
   340  		}
   341  		tagOrDigest = tagged.Tag()
   342  	} else if digested, isDigested := ref.(reference.Canonical); isDigested {
   343  		manifest, err = manSvc.Get(ctx, digested.Digest())
   344  		if err != nil {
   345  			return false, err
   346  		}
   347  		tagOrDigest = digested.Digest().String()
   348  	} else {
   349  		return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String())
   350  	}
   351  
   352  	if manifest == nil {
   353  		return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest)
   354  	}
   355  
   356  	if m, ok := manifest.(*schema2.DeserializedManifest); ok {
   357  		var allowedMediatype bool
   358  		for _, t := range p.config.Schema2Types {
   359  			if m.Manifest.Config.MediaType == t {
   360  				allowedMediatype = true
   361  				break
   362  			}
   363  		}
   364  		if !allowedMediatype {
   365  			configClass := mediaTypeClasses[m.Manifest.Config.MediaType]
   366  			if configClass == "" {
   367  				configClass = "unknown"
   368  			}
   369  			return false, fmt.Errorf("target is %s", configClass)
   370  		}
   371  	}
   372  
   373  	// If manSvc.Get succeeded, we can be confident that the registry on
   374  	// the other side speaks the v2 protocol.
   375  	p.confirmedV2 = true
   376  
   377  	logrus.Debugf("Pulling ref from V2 registry: %s", ref.String())
   378  	progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Named().Name())
   379  
   380  	var (
   381  		id             digest.Digest
   382  		manifestDigest digest.Digest
   383  	)
   384  
   385  	switch v := manifest.(type) {
   386  	case *schema1.SignedManifest:
   387  		if p.config.RequireSchema2 {
   388  			return false, fmt.Errorf("invalid manifest: not schema2")
   389  		}
   390  		id, manifestDigest, err = p.pullSchema1(ctx, ref, v)
   391  		if err != nil {
   392  			return false, err
   393  		}
   394  	case *schema2.DeserializedManifest:
   395  		id, manifestDigest, err = p.pullSchema2(ctx, ref, v)
   396  		if err != nil {
   397  			return false, err
   398  		}
   399  	case *manifestlist.DeserializedManifestList:
   400  		id, manifestDigest, err = p.pullManifestList(ctx, ref, v)
   401  		if err != nil {
   402  			return false, err
   403  		}
   404  	default:
   405  		return false, errors.New("unsupported manifest format")
   406  	}
   407  
   408  	progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())
   409  
   410  	if p.config.ReferenceStore != nil {
   411  		oldTagID, err := p.config.ReferenceStore.Get(ref)
   412  		if err == nil {
   413  			if oldTagID == id {
   414  				return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id)
   415  			}
   416  		} else if err != reference.ErrDoesNotExist {
   417  			return false, err
   418  		}
   419  
   420  		if canonical, ok := ref.(reference.Canonical); ok {
   421  			if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil {
   422  				return false, err
   423  			}
   424  		} else {
   425  			if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil {
   426  				return false, err
   427  			}
   428  			if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil {
   429  				return false, err
   430  			}
   431  		}
   432  	}
   433  	return true, nil
   434  }
   435  
   436  func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) {
   437  	var verifiedManifest *schema1.Manifest
   438  	verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref)
   439  	if err != nil {
   440  		return "", "", err
   441  	}
   442  
   443  	rootFS := image.NewRootFS()
   444  
   445  	// remove duplicate layers and check parent chain validity
   446  	err = fixManifestLayers(verifiedManifest)
   447  	if err != nil {
   448  		return "", "", err
   449  	}
   450  
   451  	var descriptors []xfer.DownloadDescriptor
   452  
   453  	// Image history converted to the new format
   454  	var history []image.History
   455  
   456  	// Note that the order of this loop is in the direction of bottom-most
   457  	// to top-most, so that the downloads slice gets ordered correctly.
   458  	for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- {
   459  		blobSum := verifiedManifest.FSLayers[i].BlobSum
   460  
   461  		var throwAway struct {
   462  			ThrowAway bool `json:"throwaway,omitempty"`
   463  		}
   464  		if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil {
   465  			return "", "", err
   466  		}
   467  
   468  		h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway)
   469  		if err != nil {
   470  			return "", "", err
   471  		}
   472  		history = append(history, h)
   473  
   474  		if throwAway.ThrowAway {
   475  			continue
   476  		}
   477  
   478  		layerDescriptor := &v2LayerDescriptor{
   479  			digest:            blobSum,
   480  			repoInfo:          p.repoInfo,
   481  			repo:              p.repo,
   482  			V2MetadataService: p.V2MetadataService,
   483  		}
   484  
   485  		descriptors = append(descriptors, layerDescriptor)
   486  	}
   487  
   488  	resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput)
   489  	if err != nil {
   490  		return "", "", err
   491  	}
   492  	defer release()
   493  
   494  	config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history)
   495  	if err != nil {
   496  		return "", "", err
   497  	}
   498  
   499  	imageID, err := p.config.ImageStore.Put(config)
   500  	if err != nil {
   501  		return "", "", err
   502  	}
   503  
   504  	manifestDigest = digest.FromBytes(unverifiedManifest.Canonical)
   505  
   506  	return imageID, manifestDigest, nil
   507  }
   508  
   509  func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) {
   510  	manifestDigest, err = schema2ManifestDigest(ref, mfst)
   511  	if err != nil {
   512  		return "", "", err
   513  	}
   514  
   515  	target := mfst.Target()
   516  	if _, err := p.config.ImageStore.Get(target.Digest); err == nil {
   517  		// If the image already exists locally, no need to pull
   518  		// anything.
   519  		return target.Digest, manifestDigest, nil
   520  	}
   521  
   522  	var descriptors []xfer.DownloadDescriptor
   523  
   524  	// Note that the order of this loop is in the direction of bottom-most
   525  	// to top-most, so that the downloads slice gets ordered correctly.
   526  	for _, d := range mfst.Layers {
   527  		layerDescriptor := &v2LayerDescriptor{
   528  			digest:            d.Digest,
   529  			repo:              p.repo,
   530  			repoInfo:          p.repoInfo,
   531  			V2MetadataService: p.V2MetadataService,
   532  			src:               d,
   533  		}
   534  
   535  		descriptors = append(descriptors, layerDescriptor)
   536  	}
   537  
   538  	configChan := make(chan []byte, 1)
   539  	errChan := make(chan error, 1)
   540  	var cancel func()
   541  	ctx, cancel = context.WithCancel(ctx)
   542  
   543  	// Pull the image config
   544  	go func() {
   545  		configJSON, err := p.pullSchema2Config(ctx, target.Digest)
   546  		if err != nil {
   547  			errChan <- ImageConfigPullError{Err: err}
   548  			cancel()
   549  			return
   550  		}
   551  		configChan <- configJSON
   552  	}()
   553  
   554  	var (
   555  		configJSON       []byte        // raw serialized image config
   556  		downloadedRootFS *image.RootFS // rootFS from registered layers
   557  		configRootFS     *image.RootFS // rootFS from configuration
   558  	)
   559  
   560  	// https://github.com/docker/docker/issues/24766 - Err on the side of caution,
   561  	// explicitly blocking images intended for linux from the Windows daemon. On
   562  	// Windows, we do this before the attempt to download, effectively serialising
   563  	// the download slightly slowing it down. We have to do it this way, as
   564  	// chances are the download of layers itself would fail due to file names
   565  	// which aren't suitable for NTFS. At some point in the future, if a similar
   566  	// check to block Windows images being pulled on Linux is implemented, it
   567  	// may be necessary to perform the same type of serialisation.
   568  	if runtime.GOOS == "windows" {
   569  		configJSON, configRootFS, err = receiveConfig(p.config.ImageStore, configChan, errChan)
   570  		if err != nil {
   571  			return "", "", err
   572  		}
   573  
   574  		if configRootFS == nil {
   575  			return "", "", errRootFSInvalid
   576  		}
   577  	}
   578  
   579  	if p.config.DownloadManager != nil {
   580  		downloadRootFS := *image.NewRootFS()
   581  		rootFS, release, err := p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput)
   582  		if err != nil {
   583  			if configJSON != nil {
   584  				// Already received the config
   585  				return "", "", err
   586  			}
   587  			select {
   588  			case err = <-errChan:
   589  				return "", "", err
   590  			default:
   591  				cancel()
   592  				select {
   593  				case <-configChan:
   594  				case <-errChan:
   595  				}
   596  				return "", "", err
   597  			}
   598  		}
   599  		if release != nil {
   600  			defer release()
   601  		}
   602  
   603  		downloadedRootFS = &rootFS
   604  	}
   605  
   606  	if configJSON == nil {
   607  		configJSON, configRootFS, err = receiveConfig(p.config.ImageStore, configChan, errChan)
   608  		if err != nil {
   609  			return "", "", err
   610  		}
   611  
   612  		if configRootFS == nil {
   613  			return "", "", errRootFSInvalid
   614  		}
   615  	}
   616  
   617  	if downloadedRootFS != nil {
   618  		// The DiffIDs returned in rootFS MUST match those in the config.
   619  		// Otherwise the image config could be referencing layers that aren't
   620  		// included in the manifest.
   621  		if len(downloadedRootFS.DiffIDs) != len(configRootFS.DiffIDs) {
   622  			return "", "", errRootFSMismatch
   623  		}
   624  
   625  		for i := range downloadedRootFS.DiffIDs {
   626  			if downloadedRootFS.DiffIDs[i] != configRootFS.DiffIDs[i] {
   627  				return "", "", errRootFSMismatch
   628  			}
   629  		}
   630  	}
   631  
   632  	imageID, err := p.config.ImageStore.Put(configJSON)
   633  	if err != nil {
   634  		return "", "", err
   635  	}
   636  
   637  	return imageID, manifestDigest, nil
   638  }
   639  
   640  func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, error) {
   641  	select {
   642  	case configJSON := <-configChan:
   643  		rootfs, err := s.RootFSFromConfig(configJSON)
   644  		if err != nil {
   645  			return nil, nil, err
   646  		}
   647  		return configJSON, rootfs, nil
   648  	case err := <-errChan:
   649  		return nil, nil, err
   650  		// Don't need a case for ctx.Done in the select because cancellation
   651  		// will trigger an error in p.pullSchema2ImageConfig.
   652  	}
   653  }
   654  
   655  // pullManifestList handles "manifest lists" which point to various
   656  // platform-specifc manifests.
   657  func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (id digest.Digest, manifestListDigest digest.Digest, err error) {
   658  	manifestListDigest, err = schema2ManifestDigest(ref, mfstList)
   659  	if err != nil {
   660  		return "", "", err
   661  	}
   662  
   663  	var manifestDigest digest.Digest
   664  	for _, manifestDescriptor := range mfstList.Manifests {
   665  		// TODO(aaronl): The manifest list spec supports optional
   666  		// "features" and "variant" fields. These are not yet used.
   667  		// Once they are, their values should be interpreted here.
   668  		if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS {
   669  			manifestDigest = manifestDescriptor.Digest
   670  			break
   671  		}
   672  	}
   673  
   674  	if manifestDigest == "" {
   675  		return "", "", errors.New("no supported platform found in manifest list")
   676  	}
   677  
   678  	manSvc, err := p.repo.Manifests(ctx)
   679  	if err != nil {
   680  		return "", "", err
   681  	}
   682  
   683  	manifest, err := manSvc.Get(ctx, manifestDigest)
   684  	if err != nil {
   685  		return "", "", err
   686  	}
   687  
   688  	manifestRef, err := reference.WithDigest(reference.TrimNamed(ref), manifestDigest)
   689  	if err != nil {
   690  		return "", "", err
   691  	}
   692  
   693  	switch v := manifest.(type) {
   694  	case *schema1.SignedManifest:
   695  		id, _, err = p.pullSchema1(ctx, manifestRef, v)
   696  		if err != nil {
   697  			return "", "", err
   698  		}
   699  	case *schema2.DeserializedManifest:
   700  		id, _, err = p.pullSchema2(ctx, manifestRef, v)
   701  		if err != nil {
   702  			return "", "", err
   703  		}
   704  	default:
   705  		return "", "", errors.New("unsupported manifest format")
   706  	}
   707  
   708  	return id, manifestListDigest, err
   709  }
   710  
   711  func (p *v2Puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) {
   712  	blobs := p.repo.Blobs(ctx)
   713  	configJSON, err = blobs.Get(ctx, dgst)
   714  	if err != nil {
   715  		return nil, err
   716  	}
   717  
   718  	// Verify image config digest
   719  	verifier, err := digest.NewDigestVerifier(dgst)
   720  	if err != nil {
   721  		return nil, err
   722  	}
   723  	if _, err := verifier.Write(configJSON); err != nil {
   724  		return nil, err
   725  	}
   726  	if !verifier.Verified() {
   727  		err := fmt.Errorf("image config verification failed for digest %s", dgst)
   728  		logrus.Error(err)
   729  		return nil, err
   730  	}
   731  
   732  	return configJSON, nil
   733  }
   734  
   735  // schema2ManifestDigest computes the manifest digest, and, if pulling by
   736  // digest, ensures that it matches the requested digest.
   737  func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) {
   738  	_, canonical, err := mfst.Payload()
   739  	if err != nil {
   740  		return "", err
   741  	}
   742  
   743  	// If pull by digest, then verify the manifest digest.
   744  	if digested, isDigested := ref.(reference.Canonical); isDigested {
   745  		verifier, err := digest.NewDigestVerifier(digested.Digest())
   746  		if err != nil {
   747  			return "", err
   748  		}
   749  		if _, err := verifier.Write(canonical); err != nil {
   750  			return "", err
   751  		}
   752  		if !verifier.Verified() {
   753  			err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest())
   754  			logrus.Error(err)
   755  			return "", err
   756  		}
   757  		return digested.Digest(), nil
   758  	}
   759  
   760  	return digest.FromBytes(canonical), nil
   761  }
   762  
   763  // allowV1Fallback checks if the error is a possible reason to fallback to v1
   764  // (even if confirmedV2 has been set already), and if so, wraps the error in
   765  // a fallbackError with confirmedV2 set to false. Otherwise, it returns the
   766  // error unmodified.
   767  func allowV1Fallback(err error) error {
   768  	switch v := err.(type) {
   769  	case errcode.Errors:
   770  		if len(v) != 0 {
   771  			if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) {
   772  				return fallbackError{
   773  					err:         err,
   774  					confirmedV2: false,
   775  					transportOK: true,
   776  				}
   777  			}
   778  		}
   779  	case errcode.Error:
   780  		if shouldV2Fallback(v) {
   781  			return fallbackError{
   782  				err:         err,
   783  				confirmedV2: false,
   784  				transportOK: true,
   785  			}
   786  		}
   787  	case *url.Error:
   788  		if v.Err == auth.ErrNoBasicAuthCredentials {
   789  			return fallbackError{err: err, confirmedV2: false}
   790  		}
   791  	}
   792  
   793  	return err
   794  }
   795  
   796  func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) {
   797  	// If pull by digest, then verify the manifest digest. NOTE: It is
   798  	// important to do this first, before any other content validation. If the
   799  	// digest cannot be verified, don't even bother with those other things.
   800  	if digested, isCanonical := ref.(reference.Canonical); isCanonical {
   801  		verifier, err := digest.NewDigestVerifier(digested.Digest())
   802  		if err != nil {
   803  			return nil, err
   804  		}
   805  		if _, err := verifier.Write(signedManifest.Canonical); err != nil {
   806  			return nil, err
   807  		}
   808  		if !verifier.Verified() {
   809  			err := fmt.Errorf("image verification failed for digest %s", digested.Digest())
   810  			logrus.Error(err)
   811  			return nil, err
   812  		}
   813  	}
   814  	m = &signedManifest.Manifest
   815  
   816  	if m.SchemaVersion != 1 {
   817  		return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String())
   818  	}
   819  	if len(m.FSLayers) != len(m.History) {
   820  		return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String())
   821  	}
   822  	if len(m.FSLayers) == 0 {
   823  		return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String())
   824  	}
   825  	return m, nil
   826  }
   827  
   828  // fixManifestLayers removes repeated layers from the manifest and checks the
   829  // correctness of the parent chain.
   830  func fixManifestLayers(m *schema1.Manifest) error {
   831  	imgs := make([]*image.V1Image, len(m.FSLayers))
   832  	for i := range m.FSLayers {
   833  		img := &image.V1Image{}
   834  
   835  		if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil {
   836  			return err
   837  		}
   838  
   839  		imgs[i] = img
   840  		if err := v1.ValidateID(img.ID); err != nil {
   841  			return err
   842  		}
   843  	}
   844  
   845  	if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" {
   846  		// Windows base layer can point to a base layer parent that is not in manifest.
   847  		return errors.New("invalid parent ID in the base layer of the image")
   848  	}
   849  
   850  	// check general duplicates to error instead of a deadlock
   851  	idmap := make(map[string]struct{})
   852  
   853  	var lastID string
   854  	for _, img := range imgs {
   855  		// skip IDs that appear after each other, we handle those later
   856  		if _, exists := idmap[img.ID]; img.ID != lastID && exists {
   857  			return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
   858  		}
   859  		lastID = img.ID
   860  		idmap[lastID] = struct{}{}
   861  	}
   862  
   863  	// backwards loop so that we keep the remaining indexes after removing items
   864  	for i := len(imgs) - 2; i >= 0; i-- {
   865  		if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
   866  			m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
   867  			m.History = append(m.History[:i], m.History[i+1:]...)
   868  		} else if imgs[i].Parent != imgs[i+1].ID {
   869  			return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent)
   870  		}
   871  	}
   872  
   873  	return nil
   874  }
   875  
   876  func createDownloadFile() (*os.File, error) {
   877  	return ioutil.TempFile("", "GetImageBlob")
   878  }