github.com/zhouyu0/docker-note@v0.0.0-20190722021225-b8d3825084db/builder/builder-next/adapters/containerimage/pull.go (about) 1 package containerimage 2 3 import ( 4 "context" 5 "encoding/json" 6 "fmt" 7 "io" 8 "io/ioutil" 9 "runtime" 10 "sync" 11 "sync/atomic" 12 "time" 13 14 "github.com/containerd/containerd/content" 15 "github.com/containerd/containerd/errdefs" 16 "github.com/containerd/containerd/images" 17 "github.com/containerd/containerd/platforms" 18 ctdreference "github.com/containerd/containerd/reference" 19 "github.com/containerd/containerd/remotes" 20 "github.com/containerd/containerd/remotes/docker" 21 "github.com/containerd/containerd/remotes/docker/schema1" 22 distreference "github.com/docker/distribution/reference" 23 "github.com/docker/docker/distribution" 24 "github.com/docker/docker/distribution/metadata" 25 "github.com/docker/docker/distribution/xfer" 26 "github.com/docker/docker/image" 27 "github.com/docker/docker/layer" 28 pkgprogress "github.com/docker/docker/pkg/progress" 29 "github.com/docker/docker/reference" 30 "github.com/moby/buildkit/cache" 31 gw "github.com/moby/buildkit/frontend/gateway/client" 32 "github.com/moby/buildkit/session" 33 "github.com/moby/buildkit/session/auth" 34 "github.com/moby/buildkit/source" 35 "github.com/moby/buildkit/util/flightcontrol" 36 "github.com/moby/buildkit/util/imageutil" 37 "github.com/moby/buildkit/util/progress" 38 "github.com/moby/buildkit/util/resolver" 39 "github.com/moby/buildkit/util/tracing" 40 digest "github.com/opencontainers/go-digest" 41 "github.com/opencontainers/image-spec/identity" 42 ocispec "github.com/opencontainers/image-spec/specs-go/v1" 43 "github.com/pkg/errors" 44 "golang.org/x/time/rate" 45 ) 46 47 // SourceOpt is options for creating the image source 48 type SourceOpt struct { 49 SessionManager *session.Manager 50 ContentStore content.Store 51 CacheAccessor cache.Accessor 52 ReferenceStore reference.Store 53 DownloadManager distribution.RootFSDownloadManager 54 MetadataStore metadata.V2MetadataService 55 ImageStore image.Store 56 ResolverOpt resolver.ResolveOptionsFunc 57 } 58 59 type imageSource struct { 60 SourceOpt 61 g flightcontrol.Group 62 resolverCache *resolverCache 63 } 64 65 // NewSource creates a new image source 66 func NewSource(opt SourceOpt) (source.Source, error) { 67 is := &imageSource{ 68 SourceOpt: opt, 69 resolverCache: newResolverCache(), 70 } 71 72 return is, nil 73 } 74 75 func (is *imageSource) ID() string { 76 return source.DockerImageScheme 77 } 78 79 func (is *imageSource) getResolver(ctx context.Context, rfn resolver.ResolveOptionsFunc, ref string) remotes.Resolver { 80 if res := is.resolverCache.Get(ctx, ref); res != nil { 81 return res 82 } 83 opt := docker.ResolverOptions{ 84 Client: tracing.DefaultClient, 85 } 86 if rfn != nil { 87 opt = rfn(ref) 88 } 89 opt.Credentials = is.getCredentialsFromSession(ctx) 90 r := docker.NewResolver(opt) 91 r = is.resolverCache.Add(ctx, ref, r) 92 return r 93 } 94 95 func (is *imageSource) getCredentialsFromSession(ctx context.Context) func(string) (string, string, error) { 96 id := session.FromContext(ctx) 97 if id == "" { 98 // can be removed after containerd/containerd#2812 99 return func(string) (string, string, error) { 100 return "", "", nil 101 } 102 } 103 return func(host string) (string, string, error) { 104 timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) 105 defer cancel() 106 107 caller, err := is.SessionManager.Get(timeoutCtx, id) 108 if err != nil { 109 return "", "", err 110 } 111 112 return auth.CredentialsFunc(tracing.ContextWithSpanFromContext(context.TODO(), ctx), caller)(host) 113 } 114 } 115 116 func (is *imageSource) resolveLocal(refStr string) ([]byte, error) { 117 ref, err := distreference.ParseNormalizedNamed(refStr) 118 if err != nil { 119 return nil, err 120 } 121 dgst, err := is.ReferenceStore.Get(ref) 122 if err != nil { 123 return nil, err 124 } 125 img, err := is.ImageStore.Get(image.ID(dgst)) 126 if err != nil { 127 return nil, err 128 } 129 return img.RawJSON(), nil 130 } 131 132 func (is *imageSource) resolveRemote(ctx context.Context, ref string, platform *ocispec.Platform) (digest.Digest, []byte, error) { 133 type t struct { 134 dgst digest.Digest 135 dt []byte 136 } 137 res, err := is.g.Do(ctx, ref, func(ctx context.Context) (interface{}, error) { 138 dgst, dt, err := imageutil.Config(ctx, ref, is.getResolver(ctx, is.ResolverOpt, ref), is.ContentStore, platform) 139 if err != nil { 140 return nil, err 141 } 142 return &t{dgst: dgst, dt: dt}, nil 143 }) 144 var typed *t 145 if err != nil { 146 return "", nil, err 147 } 148 typed = res.(*t) 149 return typed.dgst, typed.dt, nil 150 } 151 152 func (is *imageSource) ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt) (digest.Digest, []byte, error) { 153 resolveMode, err := source.ParseImageResolveMode(opt.ResolveMode) 154 if err != nil { 155 return "", nil, err 156 } 157 switch resolveMode { 158 case source.ResolveModeForcePull: 159 dgst, dt, err := is.resolveRemote(ctx, ref, opt.Platform) 160 // TODO: pull should fallback to local in case of failure to allow offline behavior 161 // the fallback doesn't work currently 162 return dgst, dt, err 163 /* 164 if err == nil { 165 return dgst, dt, err 166 } 167 // fallback to local 168 dt, err = is.resolveLocal(ref) 169 return "", dt, err 170 */ 171 172 case source.ResolveModeDefault: 173 // default == prefer local, but in the future could be smarter 174 fallthrough 175 case source.ResolveModePreferLocal: 176 dt, err := is.resolveLocal(ref) 177 if err == nil { 178 return "", dt, err 179 } 180 // fallback to remote 181 return is.resolveRemote(ctx, ref, opt.Platform) 182 } 183 // should never happen 184 return "", nil, fmt.Errorf("builder cannot resolve image %s: invalid mode %q", ref, opt.ResolveMode) 185 } 186 187 func (is *imageSource) Resolve(ctx context.Context, id source.Identifier) (source.SourceInstance, error) { 188 imageIdentifier, ok := id.(*source.ImageIdentifier) 189 if !ok { 190 return nil, errors.Errorf("invalid image identifier %v", id) 191 } 192 193 platform := platforms.DefaultSpec() 194 if imageIdentifier.Platform != nil { 195 platform = *imageIdentifier.Platform 196 } 197 198 p := &puller{ 199 src: imageIdentifier, 200 is: is, 201 resolver: is.getResolver(ctx, is.ResolverOpt, imageIdentifier.Reference.String()), 202 platform: platform, 203 } 204 return p, nil 205 } 206 207 type puller struct { 208 is *imageSource 209 resolveOnce sync.Once 210 resolveLocalOnce sync.Once 211 src *source.ImageIdentifier 212 desc ocispec.Descriptor 213 ref string 214 resolveErr error 215 resolver remotes.Resolver 216 config []byte 217 platform ocispec.Platform 218 } 219 220 func (p *puller) mainManifestKey(dgst digest.Digest, platform ocispec.Platform) (digest.Digest, error) { 221 dt, err := json.Marshal(struct { 222 Digest digest.Digest 223 OS string 224 Arch string 225 Variant string `json:",omitempty"` 226 }{ 227 Digest: p.desc.Digest, 228 OS: platform.OS, 229 Arch: platform.Architecture, 230 Variant: platform.Variant, 231 }) 232 if err != nil { 233 return "", err 234 } 235 return digest.FromBytes(dt), nil 236 } 237 238 func (p *puller) resolveLocal() { 239 p.resolveLocalOnce.Do(func() { 240 dgst := p.src.Reference.Digest() 241 if dgst != "" { 242 info, err := p.is.ContentStore.Info(context.TODO(), dgst) 243 if err == nil { 244 p.ref = p.src.Reference.String() 245 desc := ocispec.Descriptor{ 246 Size: info.Size, 247 Digest: dgst, 248 } 249 ra, err := p.is.ContentStore.ReaderAt(context.TODO(), desc) 250 if err == nil { 251 mt, err := imageutil.DetectManifestMediaType(ra) 252 if err == nil { 253 desc.MediaType = mt 254 p.desc = desc 255 } 256 } 257 } 258 } 259 260 if p.src.ResolveMode == source.ResolveModeDefault || p.src.ResolveMode == source.ResolveModePreferLocal { 261 dt, err := p.is.resolveLocal(p.src.Reference.String()) 262 if err == nil { 263 p.config = dt 264 } 265 } 266 }) 267 } 268 269 func (p *puller) resolve(ctx context.Context) error { 270 p.resolveOnce.Do(func() { 271 resolveProgressDone := oneOffProgress(ctx, "resolve "+p.src.Reference.String()) 272 273 ref, err := distreference.ParseNormalizedNamed(p.src.Reference.String()) 274 if err != nil { 275 p.resolveErr = err 276 resolveProgressDone(err) 277 return 278 } 279 280 if p.desc.Digest == "" && p.config == nil { 281 origRef, desc, err := p.resolver.Resolve(ctx, ref.String()) 282 if err != nil { 283 p.resolveErr = err 284 resolveProgressDone(err) 285 return 286 } 287 288 p.desc = desc 289 p.ref = origRef 290 } 291 292 // Schema 1 manifests cannot be resolved to an image config 293 // since the conversion must take place after all the content 294 // has been read. 295 // It may be possible to have a mapping between schema 1 manifests 296 // and the schema 2 manifests they are converted to. 297 if p.config == nil && p.desc.MediaType != images.MediaTypeDockerSchema1Manifest { 298 ref, err := distreference.WithDigest(ref, p.desc.Digest) 299 if err != nil { 300 p.resolveErr = err 301 resolveProgressDone(err) 302 return 303 } 304 _, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), gw.ResolveImageConfigOpt{Platform: &p.platform, ResolveMode: resolveModeToString(p.src.ResolveMode)}) 305 if err != nil { 306 p.resolveErr = err 307 resolveProgressDone(err) 308 return 309 } 310 311 p.config = dt 312 } 313 resolveProgressDone(nil) 314 }) 315 return p.resolveErr 316 } 317 318 func (p *puller) CacheKey(ctx context.Context, index int) (string, bool, error) { 319 p.resolveLocal() 320 321 if p.desc.Digest != "" && index == 0 { 322 dgst, err := p.mainManifestKey(p.desc.Digest, p.platform) 323 if err != nil { 324 return "", false, err 325 } 326 return dgst.String(), false, nil 327 } 328 329 if p.config != nil { 330 return cacheKeyFromConfig(p.config).String(), true, nil 331 } 332 333 if err := p.resolve(ctx); err != nil { 334 return "", false, err 335 } 336 337 if p.desc.Digest != "" && index == 0 { 338 dgst, err := p.mainManifestKey(p.desc.Digest, p.platform) 339 if err != nil { 340 return "", false, err 341 } 342 return dgst.String(), false, nil 343 } 344 345 return cacheKeyFromConfig(p.config).String(), true, nil 346 } 347 348 func (p *puller) Snapshot(ctx context.Context) (cache.ImmutableRef, error) { 349 p.resolveLocal() 350 if err := p.resolve(ctx); err != nil { 351 return nil, err 352 } 353 354 if p.config != nil { 355 img, err := p.is.ImageStore.Get(image.ID(digest.FromBytes(p.config))) 356 if err == nil { 357 if len(img.RootFS.DiffIDs) == 0 { 358 return nil, nil 359 } 360 ref, err := p.is.CacheAccessor.GetFromSnapshotter(ctx, string(img.RootFS.ChainID()), cache.WithDescription(fmt.Sprintf("from local %s", p.ref))) 361 if err != nil { 362 return nil, err 363 } 364 return ref, nil 365 } 366 } 367 368 ongoing := newJobs(p.ref) 369 370 pctx, stopProgress := context.WithCancel(ctx) 371 372 pw, _, ctx := progress.FromContext(ctx) 373 defer pw.Close() 374 375 progressDone := make(chan struct{}) 376 go func() { 377 showProgress(pctx, ongoing, p.is.ContentStore, pw) 378 close(progressDone) 379 }() 380 defer func() { 381 <-progressDone 382 }() 383 384 fetcher, err := p.resolver.Fetcher(ctx, p.ref) 385 if err != nil { 386 stopProgress() 387 return nil, err 388 } 389 390 // workaround for GCR bug that requires a request to manifest endpoint for authentication to work. 391 // if current resolver has not used manifests do a dummy request. 392 // in most cases resolver should be cached and extra request is not needed. 393 ensureManifestRequested(ctx, p.resolver, p.ref) 394 395 var ( 396 schema1Converter *schema1.Converter 397 handlers []images.Handler 398 ) 399 if p.desc.MediaType == images.MediaTypeDockerSchema1Manifest { 400 schema1Converter = schema1.NewConverter(p.is.ContentStore, fetcher) 401 handlers = append(handlers, schema1Converter) 402 403 // TODO: Optimize to do dispatch and integrate pulling with download manager, 404 // leverage existing blob mapping and layer storage 405 } else { 406 407 // TODO: need a wrapper snapshot interface that combines content 408 // and snapshots as 1) buildkit shouldn't have a dependency on contentstore 409 // or 2) cachemanager should manage the contentstore 410 handlers = append(handlers, images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { 411 switch desc.MediaType { 412 case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest, 413 images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex, 414 images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: 415 default: 416 return nil, images.ErrSkipDesc 417 } 418 ongoing.add(desc) 419 return nil, nil 420 })) 421 422 // Get all the children for a descriptor 423 childrenHandler := images.ChildrenHandler(p.is.ContentStore) 424 // Set any children labels for that content 425 childrenHandler = images.SetChildrenLabels(p.is.ContentStore, childrenHandler) 426 // Filter the children by the platform 427 childrenHandler = images.FilterPlatforms(childrenHandler, platforms.Default()) 428 429 handlers = append(handlers, 430 remotes.FetchHandler(p.is.ContentStore, fetcher), 431 childrenHandler, 432 ) 433 } 434 435 if err := images.Dispatch(ctx, images.Handlers(handlers...), p.desc); err != nil { 436 stopProgress() 437 return nil, err 438 } 439 defer stopProgress() 440 441 if schema1Converter != nil { 442 p.desc, err = schema1Converter.Convert(ctx) 443 if err != nil { 444 return nil, err 445 } 446 } 447 448 mfst, err := images.Manifest(ctx, p.is.ContentStore, p.desc, platforms.Default()) 449 if err != nil { 450 return nil, err 451 } 452 453 config, err := images.Config(ctx, p.is.ContentStore, p.desc, platforms.Default()) 454 if err != nil { 455 return nil, err 456 } 457 458 dt, err := content.ReadBlob(ctx, p.is.ContentStore, config) 459 if err != nil { 460 return nil, err 461 } 462 463 var img ocispec.Image 464 if err := json.Unmarshal(dt, &img); err != nil { 465 return nil, err 466 } 467 468 if len(mfst.Layers) != len(img.RootFS.DiffIDs) { 469 return nil, errors.Errorf("invalid config for manifest") 470 } 471 472 pchan := make(chan pkgprogress.Progress, 10) 473 defer close(pchan) 474 475 go func() { 476 m := map[string]struct { 477 st time.Time 478 limiter *rate.Limiter 479 }{} 480 for p := range pchan { 481 if p.Action == "Extracting" { 482 st, ok := m[p.ID] 483 if !ok { 484 st.st = time.Now() 485 st.limiter = rate.NewLimiter(rate.Every(100*time.Millisecond), 1) 486 m[p.ID] = st 487 } 488 var end *time.Time 489 if p.LastUpdate || st.limiter.Allow() { 490 if p.LastUpdate { 491 tm := time.Now() 492 end = &tm 493 } 494 pw.Write("extracting "+p.ID, progress.Status{ 495 Action: "extract", 496 Started: &st.st, 497 Completed: end, 498 }) 499 } 500 } 501 } 502 }() 503 504 if len(mfst.Layers) == 0 { 505 return nil, nil 506 } 507 508 layers := make([]xfer.DownloadDescriptor, 0, len(mfst.Layers)) 509 510 for i, desc := range mfst.Layers { 511 ongoing.add(desc) 512 layers = append(layers, &layerDescriptor{ 513 desc: desc, 514 diffID: layer.DiffID(img.RootFS.DiffIDs[i]), 515 fetcher: fetcher, 516 ref: p.src.Reference, 517 is: p.is, 518 }) 519 } 520 521 defer func() { 522 <-progressDone 523 for _, desc := range mfst.Layers { 524 p.is.ContentStore.Delete(context.TODO(), desc.Digest) 525 } 526 }() 527 528 r := image.NewRootFS() 529 rootFS, release, err := p.is.DownloadManager.Download(ctx, *r, runtime.GOOS, layers, pkgprogress.ChanOutput(pchan)) 530 stopProgress() 531 if err != nil { 532 return nil, err 533 } 534 535 ref, err := p.is.CacheAccessor.GetFromSnapshotter(ctx, string(rootFS.ChainID()), cache.WithDescription(fmt.Sprintf("pulled from %s", p.ref))) 536 release() 537 if err != nil { 538 return nil, err 539 } 540 541 // TODO: handle windows layers for cross platform builds 542 543 if p.src.RecordType != "" && cache.GetRecordType(ref) == "" { 544 if err := cache.SetRecordType(ref, p.src.RecordType); err != nil { 545 ref.Release(context.TODO()) 546 return nil, err 547 } 548 } 549 550 return ref, nil 551 } 552 553 // Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) 554 type layerDescriptor struct { 555 is *imageSource 556 fetcher remotes.Fetcher 557 desc ocispec.Descriptor 558 diffID layer.DiffID 559 ref ctdreference.Spec 560 } 561 562 func (ld *layerDescriptor) Key() string { 563 return "v2:" + ld.desc.Digest.String() 564 } 565 566 func (ld *layerDescriptor) ID() string { 567 return ld.desc.Digest.String() 568 } 569 570 func (ld *layerDescriptor) DiffID() (layer.DiffID, error) { 571 return ld.diffID, nil 572 } 573 574 func (ld *layerDescriptor) Download(ctx context.Context, progressOutput pkgprogress.Output) (io.ReadCloser, int64, error) { 575 rc, err := ld.fetcher.Fetch(ctx, ld.desc) 576 if err != nil { 577 return nil, 0, err 578 } 579 defer rc.Close() 580 581 refKey := remotes.MakeRefKey(ctx, ld.desc) 582 583 ld.is.ContentStore.Abort(ctx, refKey) 584 585 if err := content.WriteBlob(ctx, ld.is.ContentStore, refKey, rc, ld.desc); err != nil { 586 ld.is.ContentStore.Abort(ctx, refKey) 587 return nil, 0, err 588 } 589 590 ra, err := ld.is.ContentStore.ReaderAt(ctx, ld.desc) 591 if err != nil { 592 return nil, 0, err 593 } 594 595 return ioutil.NopCloser(content.NewReader(ra)), ld.desc.Size, nil 596 } 597 598 func (ld *layerDescriptor) Close() { 599 // ld.is.ContentStore.Delete(context.TODO(), ld.desc.Digest)) 600 } 601 602 func (ld *layerDescriptor) Registered(diffID layer.DiffID) { 603 // Cache mapping from this layer's DiffID to the blobsum 604 ld.is.MetadataStore.Add(diffID, metadata.V2Metadata{Digest: ld.desc.Digest, SourceRepository: ld.ref.Locator}) 605 } 606 607 func showProgress(ctx context.Context, ongoing *jobs, cs content.Store, pw progress.Writer) { 608 var ( 609 ticker = time.NewTicker(100 * time.Millisecond) 610 statuses = map[string]statusInfo{} 611 done bool 612 ) 613 defer ticker.Stop() 614 615 for { 616 select { 617 case <-ticker.C: 618 case <-ctx.Done(): 619 done = true 620 } 621 622 resolved := "resolved" 623 if !ongoing.isResolved() { 624 resolved = "resolving" 625 } 626 statuses[ongoing.name] = statusInfo{ 627 Ref: ongoing.name, 628 Status: resolved, 629 } 630 631 actives := make(map[string]statusInfo) 632 633 if !done { 634 active, err := cs.ListStatuses(ctx) 635 if err != nil { 636 // log.G(ctx).WithError(err).Error("active check failed") 637 continue 638 } 639 // update status of active entries! 640 for _, active := range active { 641 actives[active.Ref] = statusInfo{ 642 Ref: active.Ref, 643 Status: "downloading", 644 Offset: active.Offset, 645 Total: active.Total, 646 StartedAt: active.StartedAt, 647 UpdatedAt: active.UpdatedAt, 648 } 649 } 650 } 651 652 // now, update the items in jobs that are not in active 653 for _, j := range ongoing.jobs() { 654 refKey := remotes.MakeRefKey(ctx, j.Descriptor) 655 if a, ok := actives[refKey]; ok { 656 started := j.started 657 pw.Write(j.Digest.String(), progress.Status{ 658 Action: a.Status, 659 Total: int(a.Total), 660 Current: int(a.Offset), 661 Started: &started, 662 }) 663 continue 664 } 665 666 if !j.done { 667 info, err := cs.Info(context.TODO(), j.Digest) 668 if err != nil { 669 if errdefs.IsNotFound(err) { 670 // pw.Write(j.Digest.String(), progress.Status{ 671 // Action: "waiting", 672 // }) 673 continue 674 } 675 } else { 676 j.done = true 677 } 678 679 if done || j.done { 680 started := j.started 681 createdAt := info.CreatedAt 682 pw.Write(j.Digest.String(), progress.Status{ 683 Action: "done", 684 Current: int(info.Size), 685 Total: int(info.Size), 686 Completed: &createdAt, 687 Started: &started, 688 }) 689 } 690 } 691 } 692 if done { 693 return 694 } 695 } 696 } 697 698 // jobs provides a way of identifying the download keys for a particular task 699 // encountering during the pull walk. 700 // 701 // This is very minimal and will probably be replaced with something more 702 // featured. 703 type jobs struct { 704 name string 705 added map[digest.Digest]*job 706 mu sync.Mutex 707 resolved bool 708 } 709 710 type job struct { 711 ocispec.Descriptor 712 done bool 713 started time.Time 714 } 715 716 func newJobs(name string) *jobs { 717 return &jobs{ 718 name: name, 719 added: make(map[digest.Digest]*job), 720 } 721 } 722 723 func (j *jobs) add(desc ocispec.Descriptor) { 724 j.mu.Lock() 725 defer j.mu.Unlock() 726 727 if _, ok := j.added[desc.Digest]; ok { 728 return 729 } 730 j.added[desc.Digest] = &job{ 731 Descriptor: desc, 732 started: time.Now(), 733 } 734 } 735 736 func (j *jobs) jobs() []*job { 737 j.mu.Lock() 738 defer j.mu.Unlock() 739 740 descs := make([]*job, 0, len(j.added)) 741 for _, j := range j.added { 742 descs = append(descs, j) 743 } 744 return descs 745 } 746 747 func (j *jobs) isResolved() bool { 748 j.mu.Lock() 749 defer j.mu.Unlock() 750 return j.resolved 751 } 752 753 type statusInfo struct { 754 Ref string 755 Status string 756 Offset int64 757 Total int64 758 StartedAt time.Time 759 UpdatedAt time.Time 760 } 761 762 func oneOffProgress(ctx context.Context, id string) func(err error) error { 763 pw, _, _ := progress.FromContext(ctx) 764 now := time.Now() 765 st := progress.Status{ 766 Started: &now, 767 } 768 pw.Write(id, st) 769 return func(err error) error { 770 // TODO: set error on status 771 now := time.Now() 772 st.Completed = &now 773 pw.Write(id, st) 774 pw.Close() 775 return err 776 } 777 } 778 779 // cacheKeyFromConfig returns a stable digest from image config. If image config 780 // is a known oci image we will use chainID of layers. 781 func cacheKeyFromConfig(dt []byte) digest.Digest { 782 var img ocispec.Image 783 err := json.Unmarshal(dt, &img) 784 if err != nil { 785 return digest.FromBytes(dt) 786 } 787 if img.RootFS.Type != "layers" { 788 return digest.FromBytes(dt) 789 } 790 return identity.ChainID(img.RootFS.DiffIDs) 791 } 792 793 // resolveModeToString is the equivalent of github.com/moby/buildkit/solver/llb.ResolveMode.String() 794 // FIXME: add String method on source.ResolveMode 795 func resolveModeToString(rm source.ResolveMode) string { 796 switch rm { 797 case source.ResolveModeDefault: 798 return "default" 799 case source.ResolveModeForcePull: 800 return "pull" 801 case source.ResolveModePreferLocal: 802 return "local" 803 } 804 return "" 805 } 806 807 type resolverCache struct { 808 mu sync.Mutex 809 m map[string]cachedResolver 810 } 811 812 type cachedResolver struct { 813 timeout time.Time 814 remotes.Resolver 815 counter int64 816 } 817 818 func (cr *cachedResolver) Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) { 819 atomic.AddInt64(&cr.counter, 1) 820 return cr.Resolver.Resolve(ctx, ref) 821 } 822 823 func (r *resolverCache) Add(ctx context.Context, ref string, resolver remotes.Resolver) remotes.Resolver { 824 r.mu.Lock() 825 defer r.mu.Unlock() 826 827 ref = r.repo(ref) + "-" + session.FromContext(ctx) 828 829 cr, ok := r.m[ref] 830 cr.timeout = time.Now().Add(time.Minute) 831 if ok { 832 return &cr 833 } 834 835 cr.Resolver = resolver 836 r.m[ref] = cr 837 return &cr 838 } 839 840 func (r *resolverCache) repo(refStr string) string { 841 ref, err := distreference.ParseNormalizedNamed(refStr) 842 if err != nil { 843 return refStr 844 } 845 return ref.Name() 846 } 847 848 func (r *resolverCache) Get(ctx context.Context, ref string) remotes.Resolver { 849 r.mu.Lock() 850 defer r.mu.Unlock() 851 852 ref = r.repo(ref) + "-" + session.FromContext(ctx) 853 854 cr, ok := r.m[ref] 855 if !ok { 856 return nil 857 } 858 return &cr 859 } 860 861 func (r *resolverCache) clean(now time.Time) { 862 r.mu.Lock() 863 for k, cr := range r.m { 864 if now.After(cr.timeout) { 865 delete(r.m, k) 866 } 867 } 868 r.mu.Unlock() 869 } 870 871 func newResolverCache() *resolverCache { 872 rc := &resolverCache{ 873 m: map[string]cachedResolver{}, 874 } 875 t := time.NewTicker(time.Minute) 876 go func() { 877 for { 878 rc.clean(<-t.C) 879 } 880 }() 881 return rc 882 } 883 884 func ensureManifestRequested(ctx context.Context, res remotes.Resolver, ref string) { 885 cr, ok := res.(*cachedResolver) 886 if !ok { 887 return 888 } 889 if atomic.LoadInt64(&cr.counter) == 0 { 890 res.Resolve(ctx, ref) 891 } 892 }