github.com/endophage/docker@v1.4.2-0.20161027011718-242853499895/distribution/pull_v2.go (about)

     1  package distribution
     2  
     3  import (
     4  	"encoding/json"
     5  	"errors"
     6  	"fmt"
     7  	"io"
     8  	"io/ioutil"
     9  	"net/url"
    10  	"os"
    11  	"runtime"
    12  
    13  	"github.com/Sirupsen/logrus"
    14  	"github.com/docker/distribution"
    15  	"github.com/docker/distribution/digest"
    16  	"github.com/docker/distribution/manifest/manifestlist"
    17  	"github.com/docker/distribution/manifest/schema1"
    18  	"github.com/docker/distribution/manifest/schema2"
    19  	"github.com/docker/distribution/registry/api/errcode"
    20  	"github.com/docker/distribution/registry/client/auth"
    21  	"github.com/docker/distribution/registry/client/transport"
    22  	"github.com/docker/docker/distribution/metadata"
    23  	"github.com/docker/docker/distribution/xfer"
    24  	"github.com/docker/docker/image"
    25  	"github.com/docker/docker/image/v1"
    26  	"github.com/docker/docker/layer"
    27  	"github.com/docker/docker/pkg/ioutils"
    28  	"github.com/docker/docker/pkg/progress"
    29  	"github.com/docker/docker/pkg/stringid"
    30  	"github.com/docker/docker/reference"
    31  	"github.com/docker/docker/registry"
    32  	"golang.org/x/net/context"
    33  )
    34  
    35  var (
    36  	errRootFSMismatch  = errors.New("layers from manifest don't match image configuration")
    37  	errMediaTypePlugin = errors.New("target is a plugin")
    38  	errRootFSInvalid   = errors.New("invalid rootfs in image configuration")
    39  )
    40  
    41  // ImageConfigPullError is an error pulling the image config blob
    42  // (only applies to schema2).
    43  type ImageConfigPullError struct {
    44  	Err error
    45  }
    46  
    47  // Error returns the error string for ImageConfigPullError.
    48  func (e ImageConfigPullError) Error() string {
    49  	return "error pulling image configuration: " + e.Err.Error()
    50  }
    51  
    52  type v2Puller struct {
    53  	V2MetadataService metadata.V2MetadataService
    54  	endpoint          registry.APIEndpoint
    55  	config            *ImagePullConfig
    56  	repoInfo          *registry.RepositoryInfo
    57  	repo              distribution.Repository
    58  	// confirmedV2 is set to true if we confirm we're talking to a v2
    59  	// registry. This is used to limit fallbacks to the v1 protocol.
    60  	confirmedV2 bool
    61  }
    62  
    63  func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) {
    64  	// TODO(tiborvass): was ReceiveTimeout
    65  	p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull")
    66  	if err != nil {
    67  		logrus.Warnf("Error getting v2 registry: %v", err)
    68  		return err
    69  	}
    70  
    71  	if err = p.pullV2Repository(ctx, ref); err != nil {
    72  		if _, ok := err.(fallbackError); ok {
    73  			return err
    74  		}
    75  		if continueOnError(err) {
    76  			logrus.Errorf("Error trying v2 registry: %v", err)
    77  			return fallbackError{
    78  				err:         err,
    79  				confirmedV2: p.confirmedV2,
    80  				transportOK: true,
    81  			}
    82  		}
    83  	}
    84  	return err
    85  }
    86  
    87  func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) {
    88  	var layersDownloaded bool
    89  	if !reference.IsNameOnly(ref) {
    90  		layersDownloaded, err = p.pullV2Tag(ctx, ref)
    91  		if err != nil {
    92  			return err
    93  		}
    94  	} else {
    95  		tags, err := p.repo.Tags(ctx).All(ctx)
    96  		if err != nil {
    97  			// If this repository doesn't exist on V2, we should
    98  			// permit a fallback to V1.
    99  			return allowV1Fallback(err)
   100  		}
   101  
   102  		// The v2 registry knows about this repository, so we will not
   103  		// allow fallback to the v1 protocol even if we encounter an
   104  		// error later on.
   105  		p.confirmedV2 = true
   106  
   107  		for _, tag := range tags {
   108  			tagRef, err := reference.WithTag(ref, tag)
   109  			if err != nil {
   110  				return err
   111  			}
   112  			pulledNew, err := p.pullV2Tag(ctx, tagRef)
   113  			if err != nil {
   114  				// Since this is the pull-all-tags case, don't
   115  				// allow an error pulling a particular tag to
   116  				// make the whole pull fall back to v1.
   117  				if fallbackErr, ok := err.(fallbackError); ok {
   118  					return fallbackErr.err
   119  				}
   120  				return err
   121  			}
   122  			// pulledNew is true if either new layers were downloaded OR if existing images were newly tagged
   123  			// TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus?
   124  			layersDownloaded = layersDownloaded || pulledNew
   125  		}
   126  	}
   127  
   128  	writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded)
   129  
   130  	return nil
   131  }
   132  
   133  type v2LayerDescriptor struct {
   134  	digest            digest.Digest
   135  	repoInfo          *registry.RepositoryInfo
   136  	repo              distribution.Repository
   137  	V2MetadataService metadata.V2MetadataService
   138  	tmpFile           *os.File
   139  	verifier          digest.Verifier
   140  	src               distribution.Descriptor
   141  }
   142  
   143  func (ld *v2LayerDescriptor) Key() string {
   144  	return "v2:" + ld.digest.String()
   145  }
   146  
   147  func (ld *v2LayerDescriptor) ID() string {
   148  	return stringid.TruncateID(ld.digest.String())
   149  }
   150  
   151  func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) {
   152  	return ld.V2MetadataService.GetDiffID(ld.digest)
   153  }
   154  
   155  func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
   156  	logrus.Debugf("pulling blob %q", ld.digest)
   157  
   158  	var (
   159  		err    error
   160  		offset int64
   161  	)
   162  
   163  	if ld.tmpFile == nil {
   164  		ld.tmpFile, err = createDownloadFile()
   165  		if err != nil {
   166  			return nil, 0, xfer.DoNotRetry{Err: err}
   167  		}
   168  	} else {
   169  		offset, err = ld.tmpFile.Seek(0, os.SEEK_END)
   170  		if err != nil {
   171  			logrus.Debugf("error seeking to end of download file: %v", err)
   172  			offset = 0
   173  
   174  			ld.tmpFile.Close()
   175  			if err := os.Remove(ld.tmpFile.Name()); err != nil {
   176  				logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
   177  			}
   178  			ld.tmpFile, err = createDownloadFile()
   179  			if err != nil {
   180  				return nil, 0, xfer.DoNotRetry{Err: err}
   181  			}
   182  		} else if offset != 0 {
   183  			logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset)
   184  		}
   185  	}
   186  
   187  	tmpFile := ld.tmpFile
   188  
   189  	layerDownload, err := ld.open(ctx)
   190  	if err != nil {
   191  		logrus.Errorf("Error initiating layer download: %v", err)
   192  		if err == distribution.ErrBlobUnknown {
   193  			return nil, 0, xfer.DoNotRetry{Err: err}
   194  		}
   195  		return nil, 0, retryOnError(err)
   196  	}
   197  
   198  	if offset != 0 {
   199  		_, err := layerDownload.Seek(offset, os.SEEK_SET)
   200  		if err != nil {
   201  			if err := ld.truncateDownloadFile(); err != nil {
   202  				return nil, 0, xfer.DoNotRetry{Err: err}
   203  			}
   204  			return nil, 0, err
   205  		}
   206  	}
   207  	size, err := layerDownload.Seek(0, os.SEEK_END)
   208  	if err != nil {
   209  		// Seek failed, perhaps because there was no Content-Length
   210  		// header. This shouldn't fail the download, because we can
   211  		// still continue without a progress bar.
   212  		size = 0
   213  	} else {
   214  		if size != 0 && offset > size {
   215  			logrus.Debug("Partial download is larger than full blob. Starting over")
   216  			offset = 0
   217  			if err := ld.truncateDownloadFile(); err != nil {
   218  				return nil, 0, xfer.DoNotRetry{Err: err}
   219  			}
   220  		}
   221  
   222  		// Restore the seek offset either at the beginning of the
   223  		// stream, or just after the last byte we have from previous
   224  		// attempts.
   225  		_, err = layerDownload.Seek(offset, os.SEEK_SET)
   226  		if err != nil {
   227  			return nil, 0, err
   228  		}
   229  	}
   230  
   231  	reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading")
   232  	defer reader.Close()
   233  
   234  	if ld.verifier == nil {
   235  		ld.verifier, err = digest.NewDigestVerifier(ld.digest)
   236  		if err != nil {
   237  			return nil, 0, xfer.DoNotRetry{Err: err}
   238  		}
   239  	}
   240  
   241  	_, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier))
   242  	if err != nil {
   243  		if err == transport.ErrWrongCodeForByteRange {
   244  			if err := ld.truncateDownloadFile(); err != nil {
   245  				return nil, 0, xfer.DoNotRetry{Err: err}
   246  			}
   247  			return nil, 0, err
   248  		}
   249  		return nil, 0, retryOnError(err)
   250  	}
   251  
   252  	progress.Update(progressOutput, ld.ID(), "Verifying Checksum")
   253  
   254  	if !ld.verifier.Verified() {
   255  		err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest)
   256  		logrus.Error(err)
   257  
   258  		// Allow a retry if this digest verification error happened
   259  		// after a resumed download.
   260  		if offset != 0 {
   261  			if err := ld.truncateDownloadFile(); err != nil {
   262  				return nil, 0, xfer.DoNotRetry{Err: err}
   263  			}
   264  
   265  			return nil, 0, err
   266  		}
   267  		return nil, 0, xfer.DoNotRetry{Err: err}
   268  	}
   269  
   270  	progress.Update(progressOutput, ld.ID(), "Download complete")
   271  
   272  	logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name())
   273  
   274  	_, err = tmpFile.Seek(0, os.SEEK_SET)
   275  	if err != nil {
   276  		tmpFile.Close()
   277  		if err := os.Remove(tmpFile.Name()); err != nil {
   278  			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
   279  		}
   280  		ld.tmpFile = nil
   281  		ld.verifier = nil
   282  		return nil, 0, xfer.DoNotRetry{Err: err}
   283  	}
   284  
   285  	// hand off the temporary file to the download manager, so it will only
   286  	// be closed once
   287  	ld.tmpFile = nil
   288  
   289  	return ioutils.NewReadCloserWrapper(tmpFile, func() error {
   290  		tmpFile.Close()
   291  		err := os.RemoveAll(tmpFile.Name())
   292  		if err != nil {
   293  			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
   294  		}
   295  		return err
   296  	}), size, nil
   297  }
   298  
   299  func (ld *v2LayerDescriptor) Close() {
   300  	if ld.tmpFile != nil {
   301  		ld.tmpFile.Close()
   302  		if err := os.RemoveAll(ld.tmpFile.Name()); err != nil {
   303  			logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
   304  		}
   305  	}
   306  }
   307  
   308  func (ld *v2LayerDescriptor) truncateDownloadFile() error {
   309  	// Need a new hash context since we will be redoing the download
   310  	ld.verifier = nil
   311  
   312  	if _, err := ld.tmpFile.Seek(0, os.SEEK_SET); err != nil {
   313  		logrus.Errorf("error seeking to beginning of download file: %v", err)
   314  		return err
   315  	}
   316  
   317  	if err := ld.tmpFile.Truncate(0); err != nil {
   318  		logrus.Errorf("error truncating download file: %v", err)
   319  		return err
   320  	}
   321  
   322  	return nil
   323  }
   324  
   325  func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) {
   326  	// Cache mapping from this layer's DiffID to the blobsum
   327  	ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.FullName()})
   328  }
   329  
   330  func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) {
   331  	manSvc, err := p.repo.Manifests(ctx)
   332  	if err != nil {
   333  		return false, err
   334  	}
   335  
   336  	var (
   337  		manifest    distribution.Manifest
   338  		tagOrDigest string // Used for logging/progress only
   339  	)
   340  	if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
   341  		// NOTE: not using TagService.Get, since it uses HEAD requests
   342  		// against the manifests endpoint, which are not supported by
   343  		// all registry versions.
   344  		manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag()))
   345  		if err != nil {
   346  			return false, allowV1Fallback(err)
   347  		}
   348  		tagOrDigest = tagged.Tag()
   349  	} else if digested, isDigested := ref.(reference.Canonical); isDigested {
   350  		manifest, err = manSvc.Get(ctx, digested.Digest())
   351  		if err != nil {
   352  			return false, err
   353  		}
   354  		tagOrDigest = digested.Digest().String()
   355  	} else {
   356  		return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String())
   357  	}
   358  
   359  	if manifest == nil {
   360  		return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest)
   361  	}
   362  
   363  	if m, ok := manifest.(*schema2.DeserializedManifest); ok {
   364  		if m.Manifest.Config.MediaType == schema2.MediaTypePluginConfig {
   365  			return false, errMediaTypePlugin
   366  		}
   367  	}
   368  
   369  	// If manSvc.Get succeeded, we can be confident that the registry on
   370  	// the other side speaks the v2 protocol.
   371  	p.confirmedV2 = true
   372  
   373  	logrus.Debugf("Pulling ref from V2 registry: %s", ref.String())
   374  	progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Named().Name())
   375  
   376  	var (
   377  		id             digest.Digest
   378  		manifestDigest digest.Digest
   379  	)
   380  
   381  	switch v := manifest.(type) {
   382  	case *schema1.SignedManifest:
   383  		id, manifestDigest, err = p.pullSchema1(ctx, ref, v)
   384  		if err != nil {
   385  			return false, err
   386  		}
   387  	case *schema2.DeserializedManifest:
   388  		id, manifestDigest, err = p.pullSchema2(ctx, ref, v)
   389  		if err != nil {
   390  			return false, err
   391  		}
   392  	case *manifestlist.DeserializedManifestList:
   393  		id, manifestDigest, err = p.pullManifestList(ctx, ref, v)
   394  		if err != nil {
   395  			return false, err
   396  		}
   397  	default:
   398  		return false, errors.New("unsupported manifest format")
   399  	}
   400  
   401  	progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())
   402  
   403  	oldTagID, err := p.config.ReferenceStore.Get(ref)
   404  	if err == nil {
   405  		if oldTagID == id {
   406  			return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id)
   407  		}
   408  	} else if err != reference.ErrDoesNotExist {
   409  		return false, err
   410  	}
   411  
   412  	if canonical, ok := ref.(reference.Canonical); ok {
   413  		if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil {
   414  			return false, err
   415  		}
   416  	} else {
   417  		if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil {
   418  			return false, err
   419  		}
   420  		if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil {
   421  			return false, err
   422  		}
   423  	}
   424  	return true, nil
   425  }
   426  
   427  func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) {
   428  	var verifiedManifest *schema1.Manifest
   429  	verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref)
   430  	if err != nil {
   431  		return "", "", err
   432  	}
   433  
   434  	rootFS := image.NewRootFS()
   435  
   436  	// remove duplicate layers and check parent chain validity
   437  	err = fixManifestLayers(verifiedManifest)
   438  	if err != nil {
   439  		return "", "", err
   440  	}
   441  
   442  	var descriptors []xfer.DownloadDescriptor
   443  
   444  	// Image history converted to the new format
   445  	var history []image.History
   446  
   447  	// Note that the order of this loop is in the direction of bottom-most
   448  	// to top-most, so that the downloads slice gets ordered correctly.
   449  	for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- {
   450  		blobSum := verifiedManifest.FSLayers[i].BlobSum
   451  
   452  		var throwAway struct {
   453  			ThrowAway bool `json:"throwaway,omitempty"`
   454  		}
   455  		if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil {
   456  			return "", "", err
   457  		}
   458  
   459  		h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway)
   460  		if err != nil {
   461  			return "", "", err
   462  		}
   463  		history = append(history, h)
   464  
   465  		if throwAway.ThrowAway {
   466  			continue
   467  		}
   468  
   469  		layerDescriptor := &v2LayerDescriptor{
   470  			digest:            blobSum,
   471  			repoInfo:          p.repoInfo,
   472  			repo:              p.repo,
   473  			V2MetadataService: p.V2MetadataService,
   474  		}
   475  
   476  		descriptors = append(descriptors, layerDescriptor)
   477  	}
   478  
   479  	resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput)
   480  	if err != nil {
   481  		return "", "", err
   482  	}
   483  	defer release()
   484  
   485  	config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history)
   486  	if err != nil {
   487  		return "", "", err
   488  	}
   489  
   490  	imageID, err := p.config.ImageStore.Create(config)
   491  	if err != nil {
   492  		return "", "", err
   493  	}
   494  
   495  	manifestDigest = digest.FromBytes(unverifiedManifest.Canonical)
   496  
   497  	return imageID.Digest(), manifestDigest, nil
   498  }
   499  
   500  func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) {
   501  	manifestDigest, err = schema2ManifestDigest(ref, mfst)
   502  	if err != nil {
   503  		return "", "", err
   504  	}
   505  
   506  	target := mfst.Target()
   507  	if _, err := p.config.ImageStore.Get(image.IDFromDigest(target.Digest)); err == nil {
   508  		// If the image already exists locally, no need to pull
   509  		// anything.
   510  		return target.Digest, manifestDigest, nil
   511  	}
   512  
   513  	var descriptors []xfer.DownloadDescriptor
   514  
   515  	// Note that the order of this loop is in the direction of bottom-most
   516  	// to top-most, so that the downloads slice gets ordered correctly.
   517  	for _, d := range mfst.Layers {
   518  		layerDescriptor := &v2LayerDescriptor{
   519  			digest:            d.Digest,
   520  			repo:              p.repo,
   521  			repoInfo:          p.repoInfo,
   522  			V2MetadataService: p.V2MetadataService,
   523  			src:               d,
   524  		}
   525  
   526  		descriptors = append(descriptors, layerDescriptor)
   527  	}
   528  
   529  	configChan := make(chan []byte, 1)
   530  	errChan := make(chan error, 1)
   531  	var cancel func()
   532  	ctx, cancel = context.WithCancel(ctx)
   533  
   534  	// Pull the image config
   535  	go func() {
   536  		configJSON, err := p.pullSchema2Config(ctx, target.Digest)
   537  		if err != nil {
   538  			errChan <- ImageConfigPullError{Err: err}
   539  			cancel()
   540  			return
   541  		}
   542  		configChan <- configJSON
   543  	}()
   544  
   545  	var (
   546  		configJSON         []byte       // raw serialized image config
   547  		unmarshalledConfig image.Image  // deserialized image config
   548  		downloadRootFS     image.RootFS // rootFS to use for registering layers.
   549  	)
   550  
   551  	// https://github.com/docker/docker/issues/24766 - Err on the side of caution,
   552  	// explicitly blocking images intended for linux from the Windows daemon. On
   553  	// Windows, we do this before the attempt to download, effectively serialising
   554  	// the download slightly slowing it down. We have to do it this way, as
   555  	// chances are the download of layers itself would fail due to file names
   556  	// which aren't suitable for NTFS. At some point in the future, if a similar
   557  	// check to block Windows images being pulled on Linux is implemented, it
   558  	// may be necessary to perform the same type of serialisation.
   559  	if runtime.GOOS == "windows" {
   560  		configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan)
   561  		if err != nil {
   562  			return "", "", err
   563  		}
   564  
   565  		if unmarshalledConfig.RootFS == nil {
   566  			return "", "", errRootFSInvalid
   567  		}
   568  
   569  		if unmarshalledConfig.OS == "linux" {
   570  			return "", "", fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS)
   571  		}
   572  	}
   573  
   574  	downloadRootFS = *image.NewRootFS()
   575  
   576  	rootFS, release, err := p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput)
   577  	if err != nil {
   578  		if configJSON != nil {
   579  			// Already received the config
   580  			return "", "", err
   581  		}
   582  		select {
   583  		case err = <-errChan:
   584  			return "", "", err
   585  		default:
   586  			cancel()
   587  			select {
   588  			case <-configChan:
   589  			case <-errChan:
   590  			}
   591  			return "", "", err
   592  		}
   593  	}
   594  	defer release()
   595  
   596  	if configJSON == nil {
   597  		configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan)
   598  		if err != nil {
   599  			return "", "", err
   600  		}
   601  
   602  		if unmarshalledConfig.RootFS == nil {
   603  			return "", "", errRootFSInvalid
   604  		}
   605  	}
   606  
   607  	// The DiffIDs returned in rootFS MUST match those in the config.
   608  	// Otherwise the image config could be referencing layers that aren't
   609  	// included in the manifest.
   610  	if len(rootFS.DiffIDs) != len(unmarshalledConfig.RootFS.DiffIDs) {
   611  		return "", "", errRootFSMismatch
   612  	}
   613  
   614  	for i := range rootFS.DiffIDs {
   615  		if rootFS.DiffIDs[i] != unmarshalledConfig.RootFS.DiffIDs[i] {
   616  			return "", "", errRootFSMismatch
   617  		}
   618  	}
   619  
   620  	imageID, err := p.config.ImageStore.Create(configJSON)
   621  	if err != nil {
   622  		return "", "", err
   623  	}
   624  
   625  	return imageID.Digest(), manifestDigest, nil
   626  }
   627  
   628  func receiveConfig(configChan <-chan []byte, errChan <-chan error) ([]byte, image.Image, error) {
   629  	select {
   630  	case configJSON := <-configChan:
   631  		var unmarshalledConfig image.Image
   632  		if err := json.Unmarshal(configJSON, &unmarshalledConfig); err != nil {
   633  			return nil, image.Image{}, err
   634  		}
   635  		return configJSON, unmarshalledConfig, nil
   636  	case err := <-errChan:
   637  		return nil, image.Image{}, err
   638  		// Don't need a case for ctx.Done in the select because cancellation
   639  		// will trigger an error in p.pullSchema2ImageConfig.
   640  	}
   641  }
   642  
   643  // pullManifestList handles "manifest lists" which point to various
   644  // platform-specifc manifests.
   645  func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (id digest.Digest, manifestListDigest digest.Digest, err error) {
   646  	manifestListDigest, err = schema2ManifestDigest(ref, mfstList)
   647  	if err != nil {
   648  		return "", "", err
   649  	}
   650  
   651  	var manifestDigest digest.Digest
   652  	for _, manifestDescriptor := range mfstList.Manifests {
   653  		// TODO(aaronl): The manifest list spec supports optional
   654  		// "features" and "variant" fields. These are not yet used.
   655  		// Once they are, their values should be interpreted here.
   656  		if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS {
   657  			manifestDigest = manifestDescriptor.Digest
   658  			break
   659  		}
   660  	}
   661  
   662  	if manifestDigest == "" {
   663  		return "", "", errors.New("no supported platform found in manifest list")
   664  	}
   665  
   666  	manSvc, err := p.repo.Manifests(ctx)
   667  	if err != nil {
   668  		return "", "", err
   669  	}
   670  
   671  	manifest, err := manSvc.Get(ctx, manifestDigest)
   672  	if err != nil {
   673  		return "", "", err
   674  	}
   675  
   676  	manifestRef, err := reference.WithDigest(ref, manifestDigest)
   677  	if err != nil {
   678  		return "", "", err
   679  	}
   680  
   681  	switch v := manifest.(type) {
   682  	case *schema1.SignedManifest:
   683  		id, _, err = p.pullSchema1(ctx, manifestRef, v)
   684  		if err != nil {
   685  			return "", "", err
   686  		}
   687  	case *schema2.DeserializedManifest:
   688  		id, _, err = p.pullSchema2(ctx, manifestRef, v)
   689  		if err != nil {
   690  			return "", "", err
   691  		}
   692  	default:
   693  		return "", "", errors.New("unsupported manifest format")
   694  	}
   695  
   696  	return id, manifestListDigest, err
   697  }
   698  
   699  func (p *v2Puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) {
   700  	blobs := p.repo.Blobs(ctx)
   701  	configJSON, err = blobs.Get(ctx, dgst)
   702  	if err != nil {
   703  		return nil, err
   704  	}
   705  
   706  	// Verify image config digest
   707  	verifier, err := digest.NewDigestVerifier(dgst)
   708  	if err != nil {
   709  		return nil, err
   710  	}
   711  	if _, err := verifier.Write(configJSON); err != nil {
   712  		return nil, err
   713  	}
   714  	if !verifier.Verified() {
   715  		err := fmt.Errorf("image config verification failed for digest %s", dgst)
   716  		logrus.Error(err)
   717  		return nil, err
   718  	}
   719  
   720  	return configJSON, nil
   721  }
   722  
   723  // schema2ManifestDigest computes the manifest digest, and, if pulling by
   724  // digest, ensures that it matches the requested digest.
   725  func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) {
   726  	_, canonical, err := mfst.Payload()
   727  	if err != nil {
   728  		return "", err
   729  	}
   730  
   731  	// If pull by digest, then verify the manifest digest.
   732  	if digested, isDigested := ref.(reference.Canonical); isDigested {
   733  		verifier, err := digest.NewDigestVerifier(digested.Digest())
   734  		if err != nil {
   735  			return "", err
   736  		}
   737  		if _, err := verifier.Write(canonical); err != nil {
   738  			return "", err
   739  		}
   740  		if !verifier.Verified() {
   741  			err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest())
   742  			logrus.Error(err)
   743  			return "", err
   744  		}
   745  		return digested.Digest(), nil
   746  	}
   747  
   748  	return digest.FromBytes(canonical), nil
   749  }
   750  
   751  // allowV1Fallback checks if the error is a possible reason to fallback to v1
   752  // (even if confirmedV2 has been set already), and if so, wraps the error in
   753  // a fallbackError with confirmedV2 set to false. Otherwise, it returns the
   754  // error unmodified.
   755  func allowV1Fallback(err error) error {
   756  	switch v := err.(type) {
   757  	case errcode.Errors:
   758  		if len(v) != 0 {
   759  			if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) {
   760  				return fallbackError{
   761  					err:         err,
   762  					confirmedV2: false,
   763  					transportOK: true,
   764  				}
   765  			}
   766  		}
   767  	case errcode.Error:
   768  		if shouldV2Fallback(v) {
   769  			return fallbackError{
   770  				err:         err,
   771  				confirmedV2: false,
   772  				transportOK: true,
   773  			}
   774  		}
   775  	case *url.Error:
   776  		if v.Err == auth.ErrNoBasicAuthCredentials {
   777  			return fallbackError{err: err, confirmedV2: false}
   778  		}
   779  	}
   780  
   781  	return err
   782  }
   783  
   784  func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) {
   785  	// If pull by digest, then verify the manifest digest. NOTE: It is
   786  	// important to do this first, before any other content validation. If the
   787  	// digest cannot be verified, don't even bother with those other things.
   788  	if digested, isCanonical := ref.(reference.Canonical); isCanonical {
   789  		verifier, err := digest.NewDigestVerifier(digested.Digest())
   790  		if err != nil {
   791  			return nil, err
   792  		}
   793  		if _, err := verifier.Write(signedManifest.Canonical); err != nil {
   794  			return nil, err
   795  		}
   796  		if !verifier.Verified() {
   797  			err := fmt.Errorf("image verification failed for digest %s", digested.Digest())
   798  			logrus.Error(err)
   799  			return nil, err
   800  		}
   801  	}
   802  	m = &signedManifest.Manifest
   803  
   804  	if m.SchemaVersion != 1 {
   805  		return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String())
   806  	}
   807  	if len(m.FSLayers) != len(m.History) {
   808  		return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String())
   809  	}
   810  	if len(m.FSLayers) == 0 {
   811  		return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String())
   812  	}
   813  	return m, nil
   814  }
   815  
   816  // fixManifestLayers removes repeated layers from the manifest and checks the
   817  // correctness of the parent chain.
   818  func fixManifestLayers(m *schema1.Manifest) error {
   819  	imgs := make([]*image.V1Image, len(m.FSLayers))
   820  	for i := range m.FSLayers {
   821  		img := &image.V1Image{}
   822  
   823  		if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil {
   824  			return err
   825  		}
   826  
   827  		imgs[i] = img
   828  		if err := v1.ValidateID(img.ID); err != nil {
   829  			return err
   830  		}
   831  	}
   832  
   833  	if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" {
   834  		// Windows base layer can point to a base layer parent that is not in manifest.
   835  		return errors.New("invalid parent ID in the base layer of the image")
   836  	}
   837  
   838  	// check general duplicates to error instead of a deadlock
   839  	idmap := make(map[string]struct{})
   840  
   841  	var lastID string
   842  	for _, img := range imgs {
   843  		// skip IDs that appear after each other, we handle those later
   844  		if _, exists := idmap[img.ID]; img.ID != lastID && exists {
   845  			return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
   846  		}
   847  		lastID = img.ID
   848  		idmap[lastID] = struct{}{}
   849  	}
   850  
   851  	// backwards loop so that we keep the remaining indexes after removing items
   852  	for i := len(imgs) - 2; i >= 0; i-- {
   853  		if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
   854  			m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
   855  			m.History = append(m.History[:i], m.History[i+1:]...)
   856  		} else if imgs[i].Parent != imgs[i+1].ID {
   857  			return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent)
   858  		}
   859  	}
   860  
   861  	return nil
   862  }
   863  
   864  func createDownloadFile() (*os.File, error) {
   865  	return ioutil.TempFile("", "GetImageBlob")
   866  }