github.com/tonistiigi/docker@v0.10.1-0.20240229224939-974013b0dc6a/distribution/push_v2.go (about) 1 package distribution // import "github.com/docker/docker/distribution" 2 3 import ( 4 "context" 5 "fmt" 6 "io" 7 "os" 8 "runtime" 9 "sort" 10 "strings" 11 "sync" 12 13 "github.com/containerd/log" 14 "github.com/distribution/reference" 15 "github.com/docker/distribution" 16 "github.com/docker/distribution/manifest/schema1" 17 "github.com/docker/distribution/manifest/schema2" 18 "github.com/docker/distribution/registry/api/errcode" 19 "github.com/docker/distribution/registry/client" 20 apitypes "github.com/docker/docker/api/types" 21 "github.com/docker/docker/distribution/metadata" 22 "github.com/docker/docker/distribution/xfer" 23 "github.com/docker/docker/layer" 24 "github.com/docker/docker/pkg/ioutils" 25 "github.com/docker/docker/pkg/progress" 26 "github.com/docker/docker/pkg/stringid" 27 "github.com/docker/docker/registry" 28 "github.com/docker/libtrust" 29 "github.com/opencontainers/go-digest" 30 "github.com/pkg/errors" 31 ) 32 33 const ( 34 smallLayerMaximumSize = 100 * (1 << 10) // 100KB 35 middleLayerMaximumSize = 10 * (1 << 20) // 10MB 36 ) 37 38 // newPusher creates a new pusher for pushing to a v2 registry. 39 // The parameters are passed through to the underlying pusher implementation for 40 // use during the actual push operation. 41 func newPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, config *ImagePushConfig) *pusher { 42 return &pusher{ 43 metadataService: metadata.NewV2MetadataService(config.MetadataStore), 44 ref: ref, 45 endpoint: endpoint, 46 repoInfo: repoInfo, 47 config: config, 48 } 49 } 50 51 type pusher struct { 52 metadataService metadata.V2MetadataService 53 ref reference.Named 54 endpoint registry.APIEndpoint 55 repoInfo *registry.RepositoryInfo 56 config *ImagePushConfig 57 repo distribution.Repository 58 59 // pushState is state built by the Upload functions. 60 pushState pushState 61 } 62 63 type pushState struct { 64 sync.Mutex 65 // remoteLayers is the set of layers known to exist on the remote side. 66 // This avoids redundant queries when pushing multiple tags that 67 // involve the same layers. It is also used to fill in digest and size 68 // information when building the manifest. 69 remoteLayers map[layer.DiffID]distribution.Descriptor 70 hasAuthInfo bool 71 } 72 73 // TODO(tiborvass): have push() take a reference to repository + tag, so that the pusher itself is repository-agnostic. 74 func (p *pusher) push(ctx context.Context) (err error) { 75 p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor) 76 77 p.repo, err = newRepository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull") 78 p.pushState.hasAuthInfo = p.config.AuthConfig.RegistryToken != "" || (p.config.AuthConfig.Username != "" && p.config.AuthConfig.Password != "") 79 if err != nil { 80 log.G(ctx).Debugf("Error getting v2 registry: %v", err) 81 return err 82 } 83 84 if err = p.pushRepository(ctx); err != nil { 85 if continueOnError(err, p.endpoint.Mirror) { 86 return fallbackError{ 87 err: err, 88 transportOK: true, 89 } 90 } 91 } 92 return err 93 } 94 95 func (p *pusher) pushRepository(ctx context.Context) (err error) { 96 if namedTagged, isNamedTagged := p.ref.(reference.NamedTagged); isNamedTagged { 97 imageID, err := p.config.ReferenceStore.Get(p.ref) 98 if err != nil { 99 return fmt.Errorf("tag does not exist: %s", reference.FamiliarString(p.ref)) 100 } 101 102 return p.pushTag(ctx, namedTagged, imageID) 103 } 104 105 if !reference.IsNameOnly(p.ref) { 106 return errors.New("cannot push a digest reference") 107 } 108 109 // Push all tags 110 pushed := 0 111 for _, association := range p.config.ReferenceStore.ReferencesByName(p.ref) { 112 if namedTagged, isNamedTagged := association.Ref.(reference.NamedTagged); isNamedTagged { 113 pushed++ 114 if err := p.pushTag(ctx, namedTagged, association.ID); err != nil { 115 return err 116 } 117 } 118 } 119 120 if pushed == 0 { 121 return fmt.Errorf("no tags to push for %s", reference.FamiliarName(p.repoInfo.Name)) 122 } 123 124 return nil 125 } 126 127 func (p *pusher) pushTag(ctx context.Context, ref reference.NamedTagged, id digest.Digest) error { 128 log.G(ctx).Debugf("Pushing repository: %s", reference.FamiliarString(ref)) 129 130 imgConfig, err := p.config.ImageStore.Get(ctx, id) 131 if err != nil { 132 return fmt.Errorf("could not find image from tag %s: %v", reference.FamiliarString(ref), err) 133 } 134 135 rootfs, err := rootFSFromConfig(imgConfig) 136 if err != nil { 137 return fmt.Errorf("unable to get rootfs for image %s: %s", reference.FamiliarString(ref), err) 138 } 139 140 l, err := p.config.LayerStores.Get(rootfs.ChainID()) 141 if err != nil { 142 return fmt.Errorf("failed to get top layer from image: %v", err) 143 } 144 defer l.Release() 145 146 hmacKey, err := metadata.ComputeV2MetadataHMACKey(p.config.AuthConfig) 147 if err != nil { 148 return fmt.Errorf("failed to compute hmac key of auth config: %v", err) 149 } 150 151 var descriptors []xfer.UploadDescriptor 152 153 descriptorTemplate := pushDescriptor{ 154 metadataService: p.metadataService, 155 hmacKey: hmacKey, 156 repoInfo: p.repoInfo.Name, 157 ref: p.ref, 158 endpoint: p.endpoint, 159 repo: p.repo, 160 pushState: &p.pushState, 161 } 162 163 // Loop bounds condition is to avoid pushing the base layer on Windows. 164 for range rootfs.DiffIDs { 165 descriptor := descriptorTemplate 166 descriptor.layer = l 167 descriptor.checkedDigests = make(map[digest.Digest]struct{}) 168 descriptors = append(descriptors, &descriptor) 169 170 l = l.Parent() 171 } 172 173 if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil { 174 return err 175 } 176 177 // Try schema2 first 178 builder := schema2.NewManifestBuilder(p.repo.Blobs(ctx), p.config.ConfigMediaType, imgConfig) 179 manifest, err := manifestFromBuilder(ctx, builder, descriptors) 180 if err != nil { 181 return err 182 } 183 184 manSvc, err := p.repo.Manifests(ctx) 185 if err != nil { 186 return err 187 } 188 189 putOptions := []distribution.ManifestServiceOption{distribution.WithTag(ref.Tag())} 190 if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { 191 if runtime.GOOS == "windows" { 192 log.G(ctx).Warnf("failed to upload schema2 manifest: %v", err) 193 return err 194 } 195 196 // This is a temporary environment variables used in CI to allow pushing 197 // manifest v2 schema 1 images to test-registries used for testing *pulling* 198 // these images. 199 if os.Getenv("DOCKER_ALLOW_SCHEMA1_PUSH_DONOTUSE") == "" { 200 if err.Error() == "tag invalid" { 201 msg := "[DEPRECATED] support for pushing manifest v2 schema1 images has been removed. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/" 202 log.G(ctx).WithError(err).Error(msg) 203 return errors.Wrap(err, msg) 204 } 205 return err 206 } 207 208 log.G(ctx).Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err) 209 210 // Note: this fallback is deprecated, see log messages below 211 manifestRef, err := reference.WithTag(p.repo.Named(), ref.Tag()) 212 if err != nil { 213 return err 214 } 215 pk, err := libtrust.GenerateECP256PrivateKey() 216 if err != nil { 217 return errors.Wrap(err, "unexpected error generating private key") 218 } 219 builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), pk, manifestRef, imgConfig) 220 manifest, err = manifestFromBuilder(ctx, builder, descriptors) 221 if err != nil { 222 return err 223 } 224 225 if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { 226 return err 227 } 228 229 // schema2 failed but schema1 succeeded 230 msg := fmt.Sprintf("[DEPRECATION NOTICE] support for pushing manifest v2 schema1 images will be removed in an upcoming release. Please contact admins of the %s registry NOW to avoid future disruption. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", reference.Domain(ref)) 231 log.G(ctx).Warn(msg) 232 progress.Message(p.config.ProgressOutput, "", msg) 233 } 234 235 var canonicalManifest []byte 236 237 switch v := manifest.(type) { 238 case *schema1.SignedManifest: 239 canonicalManifest = v.Canonical 240 case *schema2.DeserializedManifest: 241 _, canonicalManifest, err = v.Payload() 242 if err != nil { 243 return err 244 } 245 } 246 247 manifestDigest := digest.FromBytes(canonicalManifest) 248 progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", ref.Tag(), manifestDigest, len(canonicalManifest)) 249 250 if err := addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { 251 return err 252 } 253 254 // Signal digest to the trust client so it can sign the 255 // push, if appropriate. 256 progress.Aux(p.config.ProgressOutput, apitypes.PushResult{Tag: ref.Tag(), Digest: manifestDigest.String(), Size: len(canonicalManifest)}) 257 258 return nil 259 } 260 261 func manifestFromBuilder(ctx context.Context, builder distribution.ManifestBuilder, descriptors []xfer.UploadDescriptor) (distribution.Manifest, error) { 262 // descriptors is in reverse order; iterate backwards to get references 263 // appended in the right order. 264 for i := len(descriptors) - 1; i >= 0; i-- { 265 if err := builder.AppendReference(descriptors[i].(*pushDescriptor)); err != nil { 266 return nil, err 267 } 268 } 269 270 return builder.Build(ctx) 271 } 272 273 type pushDescriptor struct { 274 layer PushLayer 275 metadataService metadata.V2MetadataService 276 hmacKey []byte 277 repoInfo reference.Named 278 ref reference.Named 279 endpoint registry.APIEndpoint 280 repo distribution.Repository 281 pushState *pushState 282 remoteDescriptor distribution.Descriptor 283 // a set of digests whose presence has been checked in a target repository 284 checkedDigests map[digest.Digest]struct{} 285 } 286 287 func (pd *pushDescriptor) Key() string { 288 return "v2push:" + pd.ref.Name() + " " + pd.layer.DiffID().String() 289 } 290 291 func (pd *pushDescriptor) ID() string { 292 return stringid.TruncateID(pd.layer.DiffID().String()) 293 } 294 295 func (pd *pushDescriptor) DiffID() layer.DiffID { 296 return pd.layer.DiffID() 297 } 298 299 func (pd *pushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { 300 // Skip foreign layers unless this registry allows nondistributable artifacts. 301 if !pd.endpoint.AllowNondistributableArtifacts { 302 if fs, ok := pd.layer.(distribution.Describable); ok { 303 if d := fs.Descriptor(); len(d.URLs) > 0 { 304 progress.Update(progressOutput, pd.ID(), "Skipped foreign layer") 305 return d, nil 306 } 307 } 308 } 309 310 diffID := pd.DiffID() 311 312 pd.pushState.Lock() 313 if descriptor, ok := pd.pushState.remoteLayers[diffID]; ok { 314 // it is already known that the push is not needed and 315 // therefore doing a stat is unnecessary 316 pd.pushState.Unlock() 317 progress.Update(progressOutput, pd.ID(), "Layer already exists") 318 return descriptor, nil 319 } 320 pd.pushState.Unlock() 321 322 maxMountAttempts, maxExistenceChecks, checkOtherRepositories := getMaxMountAndExistenceCheckAttempts(pd.layer) 323 324 // Do we have any metadata associated with this layer's DiffID? 325 metaData, err := pd.metadataService.GetMetadata(diffID) 326 if err == nil { 327 // check for blob existence in the target repository 328 descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, true, 1, metaData) 329 if exists || err != nil { 330 return descriptor, err 331 } 332 } 333 334 // if digest was empty or not saved, or if blob does not exist on the remote repository, 335 // then push the blob. 336 bs := pd.repo.Blobs(ctx) 337 338 var layerUpload distribution.BlobWriter 339 340 // Attempt to find another repository in the same registry to mount the layer from to avoid an unnecessary upload 341 candidates := getRepositoryMountCandidates(pd.repoInfo, pd.hmacKey, maxMountAttempts, metaData) 342 isUnauthorizedError := false 343 for _, mc := range candidates { 344 mountCandidate := mc 345 log.G(ctx).Debugf("attempting to mount layer %s (%s) from %s", diffID, mountCandidate.Digest, mountCandidate.SourceRepository) 346 createOpts := []distribution.BlobCreateOption{} 347 348 if len(mountCandidate.SourceRepository) > 0 { 349 namedRef, err := reference.ParseNormalizedNamed(mountCandidate.SourceRepository) 350 if err != nil { 351 log.G(ctx).WithError(err).Errorf("failed to parse source repository reference %v", reference.FamiliarString(namedRef)) 352 _ = pd.metadataService.Remove(mountCandidate) 353 continue 354 } 355 356 // Candidates are always under same domain, create remote reference 357 // with only path to set mount from with 358 remoteRef, err := reference.WithName(reference.Path(namedRef)) 359 if err != nil { 360 log.G(ctx).WithError(err).Errorf("failed to make remote reference out of %q", reference.Path(namedRef)) 361 continue 362 } 363 364 canonicalRef, err := reference.WithDigest(reference.TrimNamed(remoteRef), mountCandidate.Digest) 365 if err != nil { 366 log.G(ctx).WithError(err).Error("failed to make canonical reference") 367 continue 368 } 369 370 createOpts = append(createOpts, client.WithMountFrom(canonicalRef)) 371 } 372 373 // send the layer 374 lu, err := bs.Create(ctx, createOpts...) 375 switch err := err.(type) { 376 case nil: 377 // noop 378 case distribution.ErrBlobMounted: 379 progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name()) 380 381 err.Descriptor.MediaType = schema2.MediaTypeLayer 382 383 pd.pushState.Lock() 384 pd.pushState.remoteLayers[diffID] = err.Descriptor 385 pd.pushState.Unlock() 386 387 // Cache mapping from this layer's DiffID to the blobsum 388 if err := pd.metadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ 389 Digest: err.Descriptor.Digest, 390 SourceRepository: pd.repoInfo.Name(), 391 }); err != nil { 392 return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} 393 } 394 return err.Descriptor, nil 395 case errcode.Errors: 396 for _, e := range err { 397 switch e := e.(type) { 398 case errcode.Error: 399 if e.Code == errcode.ErrorCodeUnauthorized { 400 // when unauthorized error that indicate user don't has right to push layer to register 401 log.G(ctx).Debugln("failed to push layer to registry because unauthorized error") 402 isUnauthorizedError = true 403 } 404 default: 405 } 406 } 407 default: 408 log.G(ctx).Infof("failed to mount layer %s (%s) from %s: %v", diffID, mountCandidate.Digest, mountCandidate.SourceRepository, err) 409 } 410 411 // when error is unauthorizedError and user don't hasAuthInfo that's the case user don't has right to push layer to register 412 // and he hasn't login either, in this case candidate cache should be removed 413 if len(mountCandidate.SourceRepository) > 0 && 414 !(isUnauthorizedError && !pd.pushState.hasAuthInfo) && 415 (metadata.CheckV2MetadataHMAC(&mountCandidate, pd.hmacKey) || 416 len(mountCandidate.HMAC) == 0) { 417 cause := "blob mount failure" 418 if err != nil { 419 cause = fmt.Sprintf("an error: %v", err.Error()) 420 } 421 log.G(ctx).Debugf("removing association between layer %s and %s due to %s", mountCandidate.Digest, mountCandidate.SourceRepository, cause) 422 _ = pd.metadataService.Remove(mountCandidate) 423 } 424 425 if lu != nil { 426 // cancel previous upload 427 cancelLayerUpload(ctx, mountCandidate.Digest, layerUpload) 428 layerUpload = lu 429 } 430 } 431 432 if maxExistenceChecks-len(pd.checkedDigests) > 0 { 433 // do additional layer existence checks with other known digests if any 434 descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, checkOtherRepositories, maxExistenceChecks-len(pd.checkedDigests), metaData) 435 if exists || err != nil { 436 return descriptor, err 437 } 438 } 439 440 log.G(ctx).Debugf("Pushing layer: %s", diffID) 441 if layerUpload == nil { 442 layerUpload, err = bs.Create(ctx) 443 if err != nil { 444 return distribution.Descriptor{}, retryOnError(err) 445 } 446 } 447 defer layerUpload.Close() 448 // upload the blob 449 return pd.uploadUsingSession(ctx, progressOutput, diffID, layerUpload) 450 } 451 452 func (pd *pushDescriptor) SetRemoteDescriptor(descriptor distribution.Descriptor) { 453 pd.remoteDescriptor = descriptor 454 } 455 456 func (pd *pushDescriptor) Descriptor() distribution.Descriptor { 457 return pd.remoteDescriptor 458 } 459 460 func (pd *pushDescriptor) uploadUsingSession( 461 ctx context.Context, 462 progressOutput progress.Output, 463 diffID layer.DiffID, 464 layerUpload distribution.BlobWriter, 465 ) (distribution.Descriptor, error) { 466 var reader io.ReadCloser 467 468 contentReader, err := pd.layer.Open() 469 if err != nil { 470 return distribution.Descriptor{}, retryOnError(err) 471 } 472 473 reader = progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, contentReader), progressOutput, pd.layer.Size(), pd.ID(), "Pushing") 474 475 switch m := pd.layer.MediaType(); m { 476 case schema2.MediaTypeUncompressedLayer: 477 compressedReader, compressionDone := compress(reader) 478 defer func(closer io.Closer) { 479 closer.Close() 480 <-compressionDone 481 }(reader) 482 reader = compressedReader 483 case schema2.MediaTypeLayer: 484 default: 485 reader.Close() 486 return distribution.Descriptor{}, xfer.DoNotRetry{Err: fmt.Errorf("unsupported layer media type %s", m)} 487 } 488 489 digester := digest.Canonical.Digester() 490 tee := io.TeeReader(reader, digester.Hash()) 491 492 nn, err := layerUpload.ReadFrom(tee) 493 reader.Close() 494 if err != nil { 495 return distribution.Descriptor{}, retryOnError(err) 496 } 497 498 pushDigest := digester.Digest() 499 if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil { 500 return distribution.Descriptor{}, retryOnError(err) 501 } 502 503 log.G(ctx).Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn) 504 progress.Update(progressOutput, pd.ID(), "Pushed") 505 506 // Cache mapping from this layer's DiffID to the blobsum 507 if err := pd.metadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ 508 Digest: pushDigest, 509 SourceRepository: pd.repoInfo.Name(), 510 }); err != nil { 511 return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} 512 } 513 514 desc := distribution.Descriptor{ 515 Digest: pushDigest, 516 MediaType: schema2.MediaTypeLayer, 517 Size: nn, 518 } 519 520 pd.pushState.Lock() 521 pd.pushState.remoteLayers[diffID] = desc 522 pd.pushState.Unlock() 523 524 return desc, nil 525 } 526 527 // layerAlreadyExists checks if the registry already knows about any of the metadata passed in the "metadata" 528 // slice. If it finds one that the registry knows about, it returns the known digest and "true". If 529 // "checkOtherRepositories" is true, stat will be performed also with digests mapped to any other repository 530 // (not just the target one). 531 func (pd *pushDescriptor) layerAlreadyExists( 532 ctx context.Context, 533 progressOutput progress.Output, 534 diffID layer.DiffID, 535 checkOtherRepositories bool, 536 maxExistenceCheckAttempts int, 537 v2Metadata []metadata.V2Metadata, 538 ) (desc distribution.Descriptor, exists bool, err error) { 539 // filter the metadata 540 candidates := []metadata.V2Metadata{} 541 for _, meta := range v2Metadata { 542 if len(meta.SourceRepository) > 0 && !checkOtherRepositories && meta.SourceRepository != pd.repoInfo.Name() { 543 continue 544 } 545 candidates = append(candidates, meta) 546 } 547 // sort the candidates by similarity 548 sortV2MetadataByLikenessAndAge(pd.repoInfo, pd.hmacKey, candidates) 549 550 digestToMetadata := make(map[digest.Digest]*metadata.V2Metadata) 551 // an array of unique blob digests ordered from the best mount candidates to worst 552 layerDigests := []digest.Digest{} 553 for i := 0; i < len(candidates); i++ { 554 if len(layerDigests) >= maxExistenceCheckAttempts { 555 break 556 } 557 meta := &candidates[i] 558 if _, exists := digestToMetadata[meta.Digest]; exists { 559 // keep reference just to the first mapping (the best mount candidate) 560 continue 561 } 562 if _, exists := pd.checkedDigests[meta.Digest]; exists { 563 // existence of this digest has already been tested 564 continue 565 } 566 digestToMetadata[meta.Digest] = meta 567 layerDigests = append(layerDigests, meta.Digest) 568 } 569 570 attempts: 571 for _, dgst := range layerDigests { 572 meta := digestToMetadata[dgst] 573 log.G(ctx).Debugf("Checking for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.Name()) 574 desc, err = pd.repo.Blobs(ctx).Stat(ctx, dgst) 575 pd.checkedDigests[meta.Digest] = struct{}{} 576 switch err { 577 case nil: 578 if m, ok := digestToMetadata[desc.Digest]; !ok || m.SourceRepository != pd.repoInfo.Name() || !metadata.CheckV2MetadataHMAC(m, pd.hmacKey) { 579 // cache mapping from this layer's DiffID to the blobsum 580 if err := pd.metadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ 581 Digest: desc.Digest, 582 SourceRepository: pd.repoInfo.Name(), 583 }); err != nil { 584 return distribution.Descriptor{}, false, xfer.DoNotRetry{Err: err} 585 } 586 } 587 desc.MediaType = schema2.MediaTypeLayer 588 exists = true 589 break attempts 590 case distribution.ErrBlobUnknown: 591 if meta.SourceRepository == pd.repoInfo.Name() { 592 // remove the mapping to the target repository 593 pd.metadataService.Remove(*meta) 594 } 595 default: 596 log.G(ctx).WithError(err).Debugf("Failed to check for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.Name()) 597 } 598 } 599 600 if exists { 601 progress.Update(progressOutput, pd.ID(), "Layer already exists") 602 pd.pushState.Lock() 603 pd.pushState.remoteLayers[diffID] = desc 604 pd.pushState.Unlock() 605 } 606 607 return desc, exists, nil 608 } 609 610 // getMaxMountAndExistenceCheckAttempts returns a maximum number of cross repository mount attempts from 611 // source repositories of target registry, maximum number of layer existence checks performed on the target 612 // repository and whether the check shall be done also with digests mapped to different repositories. The 613 // decision is based on layer size. The smaller the layer, the fewer attempts shall be made because the cost 614 // of upload does not outweigh a latency. 615 func getMaxMountAndExistenceCheckAttempts(layer PushLayer) (maxMountAttempts, maxExistenceCheckAttempts int, checkOtherRepositories bool) { 616 size := layer.Size() 617 switch { 618 // big blob 619 case size > middleLayerMaximumSize: 620 // 1st attempt to mount the blob few times 621 // 2nd few existence checks with digests associated to any repository 622 // then fallback to upload 623 return 4, 3, true 624 625 // middle sized blobs; if we could not get the size, assume we deal with middle sized blob 626 case size > smallLayerMaximumSize: 627 // 1st attempt to mount blobs of average size few times 628 // 2nd try at most 1 existence check if there's an existing mapping to the target repository 629 // then fallback to upload 630 return 3, 1, false 631 632 // small blobs, do a minimum number of checks 633 default: 634 return 1, 1, false 635 } 636 } 637 638 // getRepositoryMountCandidates returns an array of v2 metadata items belonging to the given registry. The 639 // array is sorted from youngest to oldest. The resulting array will contain only metadata entries having 640 // registry part of SourceRepository matching the part of repoInfo. 641 func getRepositoryMountCandidates( 642 repoInfo reference.Named, 643 hmacKey []byte, 644 max int, 645 v2Metadata []metadata.V2Metadata, 646 ) []metadata.V2Metadata { 647 candidates := []metadata.V2Metadata{} 648 for _, meta := range v2Metadata { 649 sourceRepo, err := reference.ParseNamed(meta.SourceRepository) 650 if err != nil || reference.Domain(repoInfo) != reference.Domain(sourceRepo) { 651 continue 652 } 653 // target repository is not a viable candidate 654 if meta.SourceRepository == repoInfo.Name() { 655 continue 656 } 657 candidates = append(candidates, meta) 658 } 659 660 sortV2MetadataByLikenessAndAge(repoInfo, hmacKey, candidates) 661 if max >= 0 && len(candidates) > max { 662 // select the youngest metadata 663 candidates = candidates[:max] 664 } 665 666 return candidates 667 } 668 669 // byLikeness is a sorting container for v2 metadata candidates for cross repository mount. The 670 // candidate "a" is preferred over "b": 671 // 672 // 1. if it was hashed using the same AuthConfig as the one used to authenticate to target repository and the 673 // "b" was not 674 // 2. if a number of its repository path components exactly matching path components of target repository is higher 675 type byLikeness struct { 676 arr []metadata.V2Metadata 677 hmacKey []byte 678 pathComponents []string 679 } 680 681 func (bla byLikeness) Less(i, j int) bool { 682 aMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[i], bla.hmacKey) 683 bMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[j], bla.hmacKey) 684 if aMacMatch != bMacMatch { 685 return aMacMatch 686 } 687 aMatch := numOfMatchingPathComponents(bla.arr[i].SourceRepository, bla.pathComponents) 688 bMatch := numOfMatchingPathComponents(bla.arr[j].SourceRepository, bla.pathComponents) 689 return aMatch > bMatch 690 } 691 692 func (bla byLikeness) Swap(i, j int) { 693 bla.arr[i], bla.arr[j] = bla.arr[j], bla.arr[i] 694 } 695 func (bla byLikeness) Len() int { return len(bla.arr) } 696 697 func sortV2MetadataByLikenessAndAge(repoInfo reference.Named, hmacKey []byte, marr []metadata.V2Metadata) { 698 // reverse the metadata array to shift the newest entries to the beginning 699 for i := 0; i < len(marr)/2; i++ { 700 marr[i], marr[len(marr)-i-1] = marr[len(marr)-i-1], marr[i] 701 } 702 // keep equal entries ordered from the youngest to the oldest 703 sort.Stable(byLikeness{ 704 arr: marr, 705 hmacKey: hmacKey, 706 pathComponents: getPathComponents(repoInfo.Name()), 707 }) 708 } 709 710 // numOfMatchingPathComponents returns a number of path components in "pth" that exactly match "matchComponents". 711 func numOfMatchingPathComponents(pth string, matchComponents []string) int { 712 pthComponents := getPathComponents(pth) 713 i := 0 714 for ; i < len(pthComponents) && i < len(matchComponents); i++ { 715 if matchComponents[i] != pthComponents[i] { 716 return i 717 } 718 } 719 return i 720 } 721 722 func getPathComponents(path string) []string { 723 return strings.Split(path, "/") 724 } 725 726 func cancelLayerUpload(ctx context.Context, dgst digest.Digest, layerUpload distribution.BlobWriter) { 727 if layerUpload != nil { 728 log.G(ctx).Debugf("cancelling upload of blob %s", dgst) 729 err := layerUpload.Cancel(ctx) 730 if err != nil { 731 log.G(ctx).Warnf("failed to cancel upload: %v", err) 732 } 733 } 734 }