github.com/kaisenlinux/docker.io@v0.0.0-20230510090727-ea55db55fac7/engine/plugin/fetch_linux.go (about) 1 package plugin 2 3 import ( 4 "context" 5 "io" 6 "net/http" 7 "time" 8 9 "github.com/containerd/containerd/content" 10 c8derrdefs "github.com/containerd/containerd/errdefs" 11 "github.com/containerd/containerd/images" 12 "github.com/containerd/containerd/remotes" 13 "github.com/containerd/containerd/remotes/docker" 14 "github.com/docker/distribution/reference" 15 "github.com/docker/docker/api/types" 16 progressutils "github.com/docker/docker/distribution/utils" 17 "github.com/docker/docker/pkg/chrootarchive" 18 "github.com/docker/docker/pkg/ioutils" 19 "github.com/docker/docker/pkg/progress" 20 "github.com/docker/docker/pkg/stringid" 21 digest "github.com/opencontainers/go-digest" 22 specs "github.com/opencontainers/image-spec/specs-go/v1" 23 "github.com/pkg/errors" 24 "github.com/sirupsen/logrus" 25 ) 26 27 const mediaTypePluginConfig = "application/vnd.docker.plugin.v1+json" 28 29 // setupProgressOutput sets up the passed in writer to stream progress. 30 // 31 // The passed in cancel function is used by the progress writer to signal callers that there 32 // is an issue writing to the stream. 33 // 34 // The returned function is used to wait for the progress writer to be finished. 35 // Call it to make sure the progress writer is done before returning from your function as needed. 36 func setupProgressOutput(outStream io.Writer, cancel func()) (progress.Output, func()) { 37 var out progress.Output 38 f := func() {} 39 40 if outStream != nil { 41 ch := make(chan progress.Progress, 100) 42 out = progress.ChanOutput(ch) 43 44 ctx, retCancel := context.WithCancel(context.Background()) 45 go func() { 46 progressutils.WriteDistributionProgress(cancel, outStream, ch) 47 retCancel() 48 }() 49 50 f = func() { 51 close(ch) 52 <-ctx.Done() 53 } 54 } else { 55 out = progress.DiscardOutput() 56 } 57 return out, f 58 } 59 60 // fetch the content related to the passed in reference into the blob store and appends the provided images.Handlers 61 // There is no need to use remotes.FetchHandler since it already gets set 62 func (pm *Manager) fetch(ctx context.Context, ref reference.Named, auth *types.AuthConfig, out progress.Output, metaHeader http.Header, handlers ...images.Handler) (err error) { 63 // We need to make sure we have a domain on the reference 64 withDomain, err := reference.ParseNormalizedNamed(ref.String()) 65 if err != nil { 66 return errors.Wrap(err, "error parsing plugin image reference") 67 } 68 69 // Make sure we can authenticate the request since the auth scope for plugin repos is different than a normal repo. 70 ctx = docker.WithScope(ctx, scope(ref, false)) 71 72 // Make sure the fetch handler knows how to set a ref key for the plugin media type. 73 // Without this the ref key is "unknown" and we see a nasty warning message in the logs 74 ctx = remotes.WithMediaTypeKeyPrefix(ctx, mediaTypePluginConfig, "docker-plugin") 75 76 resolver, err := pm.newResolver(ctx, nil, auth, metaHeader, false) 77 if err != nil { 78 return err 79 } 80 resolved, desc, err := resolver.Resolve(ctx, withDomain.String()) 81 if err != nil { 82 // This is backwards compatible with older versions of the distribution registry. 83 // The containerd client will add it's own accept header as a comma separated list of supported manifests. 84 // This is perfectly fine, unless you are talking to an older registry which does not split the comma separated list, 85 // so it is never able to match a media type and it falls back to schema1 (yuck) and fails because our manifest the 86 // fallback does not support plugin configs... 87 logrus.WithError(err).WithField("ref", withDomain).Debug("Error while resolving reference, falling back to backwards compatible accept header format") 88 headers := http.Header{} 89 headers.Add("Accept", images.MediaTypeDockerSchema2Manifest) 90 headers.Add("Accept", images.MediaTypeDockerSchema2ManifestList) 91 headers.Add("Accept", specs.MediaTypeImageManifest) 92 headers.Add("Accept", specs.MediaTypeImageIndex) 93 resolver, _ = pm.newResolver(ctx, nil, auth, headers, false) 94 if resolver != nil { 95 resolved, desc, err = resolver.Resolve(ctx, withDomain.String()) 96 if err != nil { 97 logrus.WithError(err).WithField("ref", withDomain).Debug("Failed to resolve reference after falling back to backwards compatible accept header format") 98 } 99 } 100 if err != nil { 101 return errors.Wrap(err, "error resolving plugin reference") 102 } 103 } 104 105 fetcher, err := resolver.Fetcher(ctx, resolved) 106 if err != nil { 107 return errors.Wrap(err, "error creating plugin image fetcher") 108 } 109 110 fp := withFetchProgress(pm.blobStore, out, ref) 111 handlers = append([]images.Handler{fp, remotes.FetchHandler(pm.blobStore, fetcher)}, handlers...) 112 return images.Dispatch(ctx, images.Handlers(handlers...), nil, desc) 113 } 114 115 // applyLayer makes an images.HandlerFunc which applies a fetched image rootfs layer to a directory. 116 // 117 // TODO(@cpuguy83) This gets run sequentially after layer pull (makes sense), however 118 // if there are multiple layers to fetch we may end up extracting layers in the wrong 119 // order. 120 func applyLayer(cs content.Store, dir string, out progress.Output) images.HandlerFunc { 121 return func(ctx context.Context, desc specs.Descriptor) ([]specs.Descriptor, error) { 122 switch desc.MediaType { 123 case 124 specs.MediaTypeImageLayer, 125 images.MediaTypeDockerSchema2Layer, 126 specs.MediaTypeImageLayerGzip, 127 images.MediaTypeDockerSchema2LayerGzip: 128 default: 129 return nil, nil 130 } 131 132 ra, err := cs.ReaderAt(ctx, desc) 133 if err != nil { 134 return nil, errors.Wrapf(err, "error getting content from content store for digest %s", desc.Digest) 135 } 136 137 id := stringid.TruncateID(desc.Digest.String()) 138 139 rc := ioutils.NewReadCloserWrapper(content.NewReader(ra), ra.Close) 140 pr := progress.NewProgressReader(rc, out, desc.Size, id, "Extracting") 141 defer pr.Close() 142 143 if _, err := chrootarchive.ApplyLayer(dir, pr); err != nil { 144 return nil, errors.Wrapf(err, "error applying layer for digest %s", desc.Digest) 145 } 146 progress.Update(out, id, "Complete") 147 return nil, nil 148 } 149 } 150 151 func childrenHandler(cs content.Store) images.HandlerFunc { 152 ch := images.ChildrenHandler(cs) 153 return func(ctx context.Context, desc specs.Descriptor) ([]specs.Descriptor, error) { 154 switch desc.MediaType { 155 case mediaTypePluginConfig: 156 return nil, nil 157 default: 158 return ch(ctx, desc) 159 } 160 } 161 } 162 163 type fetchMeta struct { 164 blobs []digest.Digest 165 config digest.Digest 166 manifest digest.Digest 167 } 168 169 func storeFetchMetadata(m *fetchMeta) images.HandlerFunc { 170 return func(ctx context.Context, desc specs.Descriptor) ([]specs.Descriptor, error) { 171 switch desc.MediaType { 172 case 173 images.MediaTypeDockerSchema2LayerForeignGzip, 174 images.MediaTypeDockerSchema2Layer, 175 specs.MediaTypeImageLayer, 176 specs.MediaTypeImageLayerGzip: 177 m.blobs = append(m.blobs, desc.Digest) 178 case specs.MediaTypeImageManifest, images.MediaTypeDockerSchema2Manifest: 179 m.manifest = desc.Digest 180 case mediaTypePluginConfig: 181 m.config = desc.Digest 182 } 183 return nil, nil 184 } 185 } 186 187 func validateFetchedMetadata(md fetchMeta) error { 188 if md.config == "" { 189 return errors.New("fetched plugin image but plugin config is missing") 190 } 191 if md.manifest == "" { 192 return errors.New("fetched plugin image but manifest is missing") 193 } 194 return nil 195 } 196 197 // withFetchProgress is a fetch handler which registers a descriptor with a progress 198 func withFetchProgress(cs content.Store, out progress.Output, ref reference.Named) images.HandlerFunc { 199 return func(ctx context.Context, desc specs.Descriptor) ([]specs.Descriptor, error) { 200 switch desc.MediaType { 201 case specs.MediaTypeImageManifest, images.MediaTypeDockerSchema2Manifest: 202 tn := reference.TagNameOnly(ref) 203 tagged := tn.(reference.Tagged) 204 progress.Messagef(out, tagged.Tag(), "Pulling from %s", reference.FamiliarName(ref)) 205 progress.Messagef(out, "", "Digest: %s", desc.Digest.String()) 206 return nil, nil 207 case 208 images.MediaTypeDockerSchema2LayerGzip, 209 images.MediaTypeDockerSchema2Layer, 210 specs.MediaTypeImageLayer, 211 specs.MediaTypeImageLayerGzip: 212 default: 213 return nil, nil 214 } 215 216 id := stringid.TruncateID(desc.Digest.String()) 217 218 if _, err := cs.Info(ctx, desc.Digest); err == nil { 219 out.WriteProgress(progress.Progress{ID: id, Action: "Already exists", LastUpdate: true}) 220 return nil, nil 221 } 222 223 progress.Update(out, id, "Waiting") 224 225 key := remotes.MakeRefKey(ctx, desc) 226 227 go func() { 228 timer := time.NewTimer(100 * time.Millisecond) 229 if !timer.Stop() { 230 <-timer.C 231 } 232 defer timer.Stop() 233 234 var pulling bool 235 var ctxErr error 236 237 for { 238 timer.Reset(100 * time.Millisecond) 239 240 select { 241 case <-ctx.Done(): 242 ctxErr = ctx.Err() 243 // make sure we can still fetch from the content store 244 // TODO: Might need to add some sort of timeout 245 ctx = context.Background() 246 case <-timer.C: 247 } 248 249 s, err := cs.Status(ctx, key) 250 if err != nil { 251 if !c8derrdefs.IsNotFound(err) { 252 logrus.WithError(err).WithField("layerDigest", desc.Digest.String()).Error("Error looking up status of plugin layer pull") 253 progress.Update(out, id, err.Error()) 254 return 255 } 256 257 if _, err := cs.Info(ctx, desc.Digest); err == nil { 258 progress.Update(out, id, "Download complete") 259 return 260 } 261 262 if ctxErr != nil { 263 progress.Update(out, id, ctxErr.Error()) 264 return 265 } 266 267 continue 268 } 269 270 if !pulling { 271 progress.Update(out, id, "Pulling fs layer") 272 pulling = true 273 } 274 275 if s.Offset == s.Total { 276 out.WriteProgress(progress.Progress{ID: id, Action: "Download complete", Current: s.Offset, LastUpdate: true}) 277 return 278 } 279 280 out.WriteProgress(progress.Progress{ID: id, Action: "Downloading", Current: s.Offset, Total: s.Total}) 281 } 282 }() 283 return nil, nil 284 } 285 }