github.com/docker/docker@v299999999.0.0-20200612211812-aaf470eca7b5+incompatible/plugin/fetch_linux.go (about)

     1  package plugin
     2  
     3  import (
     4  	"context"
     5  	"io"
     6  	"net/http"
     7  	"time"
     8  
     9  	"github.com/containerd/containerd/content"
    10  	c8derrdefs "github.com/containerd/containerd/errdefs"
    11  	"github.com/containerd/containerd/images"
    12  	"github.com/containerd/containerd/remotes"
    13  	"github.com/containerd/containerd/remotes/docker"
    14  	"github.com/docker/distribution/reference"
    15  	"github.com/docker/docker/api/types"
    16  	progressutils "github.com/docker/docker/distribution/utils"
    17  	"github.com/docker/docker/pkg/chrootarchive"
    18  	"github.com/docker/docker/pkg/ioutils"
    19  	"github.com/docker/docker/pkg/progress"
    20  	"github.com/docker/docker/pkg/stringid"
    21  	digest "github.com/opencontainers/go-digest"
    22  	specs "github.com/opencontainers/image-spec/specs-go/v1"
    23  	"github.com/pkg/errors"
    24  	"github.com/sirupsen/logrus"
    25  )
    26  
    27  const mediaTypePluginConfig = "application/vnd.docker.plugin.v1+json"
    28  
    29  // setupProgressOutput sets up the passed in writer to stream progress.
    30  //
    31  // The passed in cancel function is used by the progress writer to signal callers that there
    32  // is an issue writing to the stream.
    33  //
    34  // The returned function is used to wait for the progress writer to be finished.
    35  // Call it to make sure the progress writer is done before returning from your function as needed.
    36  func setupProgressOutput(outStream io.Writer, cancel func()) (progress.Output, func()) {
    37  	var out progress.Output
    38  	f := func() {}
    39  
    40  	if outStream != nil {
    41  		ch := make(chan progress.Progress, 100)
    42  		out = progress.ChanOutput(ch)
    43  
    44  		ctx, retCancel := context.WithCancel(context.Background())
    45  		go func() {
    46  			progressutils.WriteDistributionProgress(cancel, outStream, ch)
    47  			retCancel()
    48  		}()
    49  
    50  		f = func() {
    51  			close(ch)
    52  			<-ctx.Done()
    53  		}
    54  	} else {
    55  		out = progress.DiscardOutput()
    56  	}
    57  	return out, f
    58  }
    59  
    60  // fetch the content related to the passed in reference into the blob store and appends the provided images.Handlers
    61  // There is no need to use remotes.FetchHandler since it already gets set
    62  func (pm *Manager) fetch(ctx context.Context, ref reference.Named, auth *types.AuthConfig, out progress.Output, metaHeader http.Header, handlers ...images.Handler) (err error) {
    63  	// We need to make sure we have a domain on the reference
    64  	withDomain, err := reference.ParseNormalizedNamed(ref.String())
    65  	if err != nil {
    66  		return errors.Wrap(err, "error parsing plugin image reference")
    67  	}
    68  
    69  	// Make sure we can authenticate the request since the auth scope for plugin repos is different than a normal repo.
    70  	ctx = docker.WithScope(ctx, scope(ref, false))
    71  
    72  	// Make sure the fetch handler knows how to set a ref key for the plugin media type.
    73  	// Without this the ref key is "unknown" and we see a nasty warning message in the logs
    74  	ctx = remotes.WithMediaTypeKeyPrefix(ctx, mediaTypePluginConfig, "docker-plugin")
    75  
    76  	resolver, err := pm.newResolver(ctx, nil, auth, metaHeader, false)
    77  	if err != nil {
    78  		return err
    79  	}
    80  	resolved, desc, err := resolver.Resolve(ctx, withDomain.String())
    81  	if err != nil {
    82  		// This is backwards compatible with older versions of the distribution registry.
    83  		// The containerd client will add it's own accept header as a comma separated list of supported manifests.
    84  		// This is perfectly fine, unless you are talking to an older registry which does not split the comma separated list,
    85  		//   so it is never able to match a media type and it falls back to schema1 (yuck) and fails because our manifest the
    86  		//   fallback does not support plugin configs...
    87  		logrus.WithError(err).WithField("ref", withDomain).Debug("Error while resolving reference, falling back to backwards compatible accept header format")
    88  		headers := http.Header{}
    89  		headers.Add("Accept", images.MediaTypeDockerSchema2Manifest)
    90  		headers.Add("Accept", images.MediaTypeDockerSchema2ManifestList)
    91  		headers.Add("Accept", specs.MediaTypeImageManifest)
    92  		headers.Add("Accept", specs.MediaTypeImageIndex)
    93  		resolver, _ = pm.newResolver(ctx, nil, auth, headers, false)
    94  		if resolver != nil {
    95  			resolved, desc, err = resolver.Resolve(ctx, withDomain.String())
    96  			if err != nil {
    97  				logrus.WithError(err).WithField("ref", withDomain).Debug("Failed to resolve reference after falling back to backwards compatible accept header format")
    98  			}
    99  		}
   100  		if err != nil {
   101  			return errors.Wrap(err, "error resolving plugin reference")
   102  		}
   103  	}
   104  
   105  	fetcher, err := resolver.Fetcher(ctx, resolved)
   106  	if err != nil {
   107  		return errors.Wrap(err, "error creating plugin image fetcher")
   108  	}
   109  
   110  	fp := withFetchProgress(pm.blobStore, out, ref)
   111  	handlers = append([]images.Handler{fp, remotes.FetchHandler(pm.blobStore, fetcher)}, handlers...)
   112  	if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, desc); err != nil {
   113  		return err
   114  	}
   115  	return nil
   116  }
   117  
   118  // applyLayer makes an images.HandlerFunc which applies a fetched image rootfs layer to a directory.
   119  //
   120  // TODO(@cpuguy83) This gets run sequentially after layer pull (makes sense), however
   121  // if there are multiple layers to fetch we may end up extracting layers in the wrong
   122  // order.
   123  func applyLayer(cs content.Store, dir string, out progress.Output) images.HandlerFunc {
   124  	return func(ctx context.Context, desc specs.Descriptor) ([]specs.Descriptor, error) {
   125  		switch desc.MediaType {
   126  		case
   127  			specs.MediaTypeImageLayer,
   128  			images.MediaTypeDockerSchema2Layer,
   129  			specs.MediaTypeImageLayerGzip,
   130  			images.MediaTypeDockerSchema2LayerGzip:
   131  		default:
   132  			return nil, nil
   133  		}
   134  
   135  		ra, err := cs.ReaderAt(ctx, desc)
   136  		if err != nil {
   137  			return nil, errors.Wrapf(err, "error getting content from content store for digest %s", desc.Digest)
   138  		}
   139  
   140  		id := stringid.TruncateID(desc.Digest.String())
   141  
   142  		rc := ioutils.NewReadCloserWrapper(content.NewReader(ra), ra.Close)
   143  		pr := progress.NewProgressReader(rc, out, desc.Size, id, "Extracting")
   144  		defer pr.Close()
   145  
   146  		if _, err := chrootarchive.ApplyLayer(dir, pr); err != nil {
   147  			return nil, errors.Wrapf(err, "error applying layer for digest %s", desc.Digest)
   148  		}
   149  		progress.Update(out, id, "Complete")
   150  		return nil, nil
   151  	}
   152  }
   153  
   154  func childrenHandler(cs content.Store) images.HandlerFunc {
   155  	ch := images.ChildrenHandler(cs)
   156  	return func(ctx context.Context, desc specs.Descriptor) ([]specs.Descriptor, error) {
   157  		switch desc.MediaType {
   158  		case mediaTypePluginConfig:
   159  			return nil, nil
   160  		default:
   161  			return ch(ctx, desc)
   162  		}
   163  	}
   164  }
   165  
   166  type fetchMeta struct {
   167  	blobs    []digest.Digest
   168  	config   digest.Digest
   169  	manifest digest.Digest
   170  }
   171  
   172  func storeFetchMetadata(m *fetchMeta) images.HandlerFunc {
   173  	return func(ctx context.Context, desc specs.Descriptor) ([]specs.Descriptor, error) {
   174  		switch desc.MediaType {
   175  		case
   176  			images.MediaTypeDockerSchema2LayerForeignGzip,
   177  			images.MediaTypeDockerSchema2Layer,
   178  			specs.MediaTypeImageLayer,
   179  			specs.MediaTypeImageLayerGzip:
   180  			m.blobs = append(m.blobs, desc.Digest)
   181  		case specs.MediaTypeImageManifest, images.MediaTypeDockerSchema2Manifest:
   182  			m.manifest = desc.Digest
   183  		case mediaTypePluginConfig:
   184  			m.config = desc.Digest
   185  		}
   186  		return nil, nil
   187  	}
   188  }
   189  
   190  func validateFetchedMetadata(md fetchMeta) error {
   191  	if md.config == "" {
   192  		return errors.New("fetched plugin image but plugin config is missing")
   193  	}
   194  	if md.manifest == "" {
   195  		return errors.New("fetched plugin image but manifest is missing")
   196  	}
   197  	return nil
   198  }
   199  
   200  // withFetchProgress is a fetch handler which registers a descriptor with a progress
   201  func withFetchProgress(cs content.Store, out progress.Output, ref reference.Named) images.HandlerFunc {
   202  	return func(ctx context.Context, desc specs.Descriptor) ([]specs.Descriptor, error) {
   203  		switch desc.MediaType {
   204  		case specs.MediaTypeImageManifest, images.MediaTypeDockerSchema2Manifest:
   205  			tn := reference.TagNameOnly(ref)
   206  			tagged := tn.(reference.Tagged)
   207  			progress.Messagef(out, tagged.Tag(), "Pulling from %s", reference.FamiliarName(ref))
   208  			progress.Messagef(out, "", "Digest: %s", desc.Digest.String())
   209  			return nil, nil
   210  		case
   211  			images.MediaTypeDockerSchema2LayerGzip,
   212  			images.MediaTypeDockerSchema2Layer,
   213  			specs.MediaTypeImageLayer,
   214  			specs.MediaTypeImageLayerGzip:
   215  		default:
   216  			return nil, nil
   217  		}
   218  
   219  		id := stringid.TruncateID(desc.Digest.String())
   220  
   221  		if _, err := cs.Info(ctx, desc.Digest); err == nil {
   222  			out.WriteProgress(progress.Progress{ID: id, Action: "Already exists", LastUpdate: true})
   223  			return nil, nil
   224  		}
   225  
   226  		progress.Update(out, id, "Waiting")
   227  
   228  		key := remotes.MakeRefKey(ctx, desc)
   229  
   230  		go func() {
   231  			timer := time.NewTimer(100 * time.Millisecond)
   232  			if !timer.Stop() {
   233  				<-timer.C
   234  			}
   235  			defer timer.Stop()
   236  
   237  			var pulling bool
   238  			var ctxErr error
   239  
   240  			for {
   241  				timer.Reset(100 * time.Millisecond)
   242  
   243  				select {
   244  				case <-ctx.Done():
   245  					ctxErr = ctx.Err()
   246  					// make sure we can still fetch from the content store
   247  					// TODO: Might need to add some sort of timeout
   248  					ctx = context.Background()
   249  				case <-timer.C:
   250  				}
   251  
   252  				s, err := cs.Status(ctx, key)
   253  				if err != nil {
   254  					if !c8derrdefs.IsNotFound(err) {
   255  						logrus.WithError(err).WithField("layerDigest", desc.Digest.String()).Error("Error looking up status of plugin layer pull")
   256  						progress.Update(out, id, err.Error())
   257  						return
   258  					}
   259  
   260  					if _, err := cs.Info(ctx, desc.Digest); err == nil {
   261  						progress.Update(out, id, "Download complete")
   262  						return
   263  					}
   264  
   265  					if ctxErr != nil {
   266  						progress.Update(out, id, ctxErr.Error())
   267  						return
   268  					}
   269  
   270  					continue
   271  				}
   272  
   273  				if !pulling {
   274  					progress.Update(out, id, "Pulling fs layer")
   275  					pulling = true
   276  				}
   277  
   278  				if s.Offset == s.Total {
   279  					out.WriteProgress(progress.Progress{ID: id, Action: "Download complete", Current: s.Offset, LastUpdate: true})
   280  					return
   281  				}
   282  
   283  				out.WriteProgress(progress.Progress{ID: id, Action: "Downloading", Current: s.Offset, Total: s.Total})
   284  			}
   285  		}()
   286  		return nil, nil
   287  	}
   288  }