github.com/endophage/docker@v1.4.2-0.20161027011718-242853499895/distribution/push_v2.go (about) 1 package distribution 2 3 import ( 4 "errors" 5 "fmt" 6 "io" 7 "runtime" 8 "sort" 9 "strings" 10 "sync" 11 12 "golang.org/x/net/context" 13 14 "github.com/Sirupsen/logrus" 15 "github.com/docker/distribution" 16 "github.com/docker/distribution/digest" 17 "github.com/docker/distribution/manifest/schema1" 18 "github.com/docker/distribution/manifest/schema2" 19 distreference "github.com/docker/distribution/reference" 20 "github.com/docker/distribution/registry/client" 21 "github.com/docker/docker/distribution/metadata" 22 "github.com/docker/docker/distribution/xfer" 23 "github.com/docker/docker/image" 24 "github.com/docker/docker/layer" 25 "github.com/docker/docker/pkg/ioutils" 26 "github.com/docker/docker/pkg/progress" 27 "github.com/docker/docker/pkg/stringid" 28 "github.com/docker/docker/reference" 29 "github.com/docker/docker/registry" 30 ) 31 32 const ( 33 smallLayerMaximumSize = 100 * (1 << 10) // 100KB 34 middleLayerMaximumSize = 10 * (1 << 20) // 10MB 35 ) 36 37 // PushResult contains the tag, manifest digest, and manifest size from the 38 // push. It's used to signal this information to the trust code in the client 39 // so it can sign the manifest if necessary. 40 type PushResult struct { 41 Tag string 42 Digest digest.Digest 43 Size int 44 } 45 46 type v2Pusher struct { 47 v2MetadataService metadata.V2MetadataService 48 ref reference.Named 49 endpoint registry.APIEndpoint 50 repoInfo *registry.RepositoryInfo 51 config *ImagePushConfig 52 repo distribution.Repository 53 54 // pushState is state built by the Upload functions. 55 pushState pushState 56 } 57 58 type pushState struct { 59 sync.Mutex 60 // remoteLayers is the set of layers known to exist on the remote side. 61 // This avoids redundant queries when pushing multiple tags that 62 // involve the same layers. It is also used to fill in digest and size 63 // information when building the manifest. 64 remoteLayers map[layer.DiffID]distribution.Descriptor 65 // confirmedV2 is set to true if we confirm we're talking to a v2 66 // registry. This is used to limit fallbacks to the v1 protocol. 67 confirmedV2 bool 68 } 69 70 func (p *v2Pusher) Push(ctx context.Context) (err error) { 71 p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor) 72 73 p.repo, p.pushState.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull") 74 if err != nil { 75 logrus.Debugf("Error getting v2 registry: %v", err) 76 return err 77 } 78 79 if err = p.pushV2Repository(ctx); err != nil { 80 if continueOnError(err) { 81 return fallbackError{ 82 err: err, 83 confirmedV2: p.pushState.confirmedV2, 84 transportOK: true, 85 } 86 } 87 } 88 return err 89 } 90 91 func (p *v2Pusher) pushV2Repository(ctx context.Context) (err error) { 92 if namedTagged, isNamedTagged := p.ref.(reference.NamedTagged); isNamedTagged { 93 imageID, err := p.config.ReferenceStore.Get(p.ref) 94 if err != nil { 95 return fmt.Errorf("tag does not exist: %s", p.ref.String()) 96 } 97 98 return p.pushV2Tag(ctx, namedTagged, imageID) 99 } 100 101 if !reference.IsNameOnly(p.ref) { 102 return errors.New("cannot push a digest reference") 103 } 104 105 // Pull all tags 106 pushed := 0 107 for _, association := range p.config.ReferenceStore.ReferencesByName(p.ref) { 108 if namedTagged, isNamedTagged := association.Ref.(reference.NamedTagged); isNamedTagged { 109 pushed++ 110 if err := p.pushV2Tag(ctx, namedTagged, association.ID); err != nil { 111 return err 112 } 113 } 114 } 115 116 if pushed == 0 { 117 return fmt.Errorf("no tags to push for %s", p.repoInfo.Name()) 118 } 119 120 return nil 121 } 122 123 func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id digest.Digest) error { 124 logrus.Debugf("Pushing repository: %s", ref.String()) 125 126 img, err := p.config.ImageStore.Get(image.IDFromDigest(id)) 127 if err != nil { 128 return fmt.Errorf("could not find image from tag %s: %v", ref.String(), err) 129 } 130 131 var l layer.Layer 132 133 topLayerID := img.RootFS.ChainID() 134 if topLayerID == "" { 135 l = layer.EmptyLayer 136 } else { 137 l, err = p.config.LayerStore.Get(topLayerID) 138 if err != nil { 139 return fmt.Errorf("failed to get top layer from image: %v", err) 140 } 141 defer layer.ReleaseAndLog(p.config.LayerStore, l) 142 } 143 144 hmacKey, err := metadata.ComputeV2MetadataHMACKey(p.config.AuthConfig) 145 if err != nil { 146 return fmt.Errorf("failed to compute hmac key of auth config: %v", err) 147 } 148 149 var descriptors []xfer.UploadDescriptor 150 151 descriptorTemplate := v2PushDescriptor{ 152 v2MetadataService: p.v2MetadataService, 153 hmacKey: hmacKey, 154 repoInfo: p.repoInfo, 155 ref: p.ref, 156 repo: p.repo, 157 pushState: &p.pushState, 158 } 159 160 // Loop bounds condition is to avoid pushing the base layer on Windows. 161 for i := 0; i < len(img.RootFS.DiffIDs); i++ { 162 descriptor := descriptorTemplate 163 descriptor.layer = l 164 descriptor.checkedDigests = make(map[digest.Digest]struct{}) 165 descriptors = append(descriptors, &descriptor) 166 167 l = l.Parent() 168 } 169 170 if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil { 171 return err 172 } 173 174 // Try schema2 first 175 builder := schema2.NewManifestBuilder(p.repo.Blobs(ctx), img.RawJSON()) 176 manifest, err := manifestFromBuilder(ctx, builder, descriptors) 177 if err != nil { 178 return err 179 } 180 181 manSvc, err := p.repo.Manifests(ctx) 182 if err != nil { 183 return err 184 } 185 186 putOptions := []distribution.ManifestServiceOption{distribution.WithTag(ref.Tag())} 187 if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { 188 if runtime.GOOS == "windows" { 189 logrus.Warnf("failed to upload schema2 manifest: %v", err) 190 return err 191 } 192 193 logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err) 194 195 manifestRef, err := distreference.WithTag(p.repo.Named(), ref.Tag()) 196 if err != nil { 197 return err 198 } 199 builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, manifestRef, img.RawJSON()) 200 manifest, err = manifestFromBuilder(ctx, builder, descriptors) 201 if err != nil { 202 return err 203 } 204 205 if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { 206 return err 207 } 208 } 209 210 var canonicalManifest []byte 211 212 switch v := manifest.(type) { 213 case *schema1.SignedManifest: 214 canonicalManifest = v.Canonical 215 case *schema2.DeserializedManifest: 216 _, canonicalManifest, err = v.Payload() 217 if err != nil { 218 return err 219 } 220 } 221 222 manifestDigest := digest.FromBytes(canonicalManifest) 223 progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", ref.Tag(), manifestDigest, len(canonicalManifest)) 224 225 if err := addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { 226 return err 227 } 228 229 // Signal digest to the trust client so it can sign the 230 // push, if appropriate. 231 progress.Aux(p.config.ProgressOutput, PushResult{Tag: ref.Tag(), Digest: manifestDigest, Size: len(canonicalManifest)}) 232 233 return nil 234 } 235 236 func manifestFromBuilder(ctx context.Context, builder distribution.ManifestBuilder, descriptors []xfer.UploadDescriptor) (distribution.Manifest, error) { 237 // descriptors is in reverse order; iterate backwards to get references 238 // appended in the right order. 239 for i := len(descriptors) - 1; i >= 0; i-- { 240 if err := builder.AppendReference(descriptors[i].(*v2PushDescriptor)); err != nil { 241 return nil, err 242 } 243 } 244 245 return builder.Build(ctx) 246 } 247 248 type v2PushDescriptor struct { 249 layer layer.Layer 250 v2MetadataService metadata.V2MetadataService 251 hmacKey []byte 252 repoInfo reference.Named 253 ref reference.Named 254 repo distribution.Repository 255 pushState *pushState 256 remoteDescriptor distribution.Descriptor 257 // a set of digests whose presence has been checked in a target repository 258 checkedDigests map[digest.Digest]struct{} 259 } 260 261 func (pd *v2PushDescriptor) Key() string { 262 return "v2push:" + pd.ref.FullName() + " " + pd.layer.DiffID().String() 263 } 264 265 func (pd *v2PushDescriptor) ID() string { 266 return stringid.TruncateID(pd.layer.DiffID().String()) 267 } 268 269 func (pd *v2PushDescriptor) DiffID() layer.DiffID { 270 return pd.layer.DiffID() 271 } 272 273 func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { 274 if fs, ok := pd.layer.(distribution.Describable); ok { 275 if d := fs.Descriptor(); len(d.URLs) > 0 { 276 progress.Update(progressOutput, pd.ID(), "Skipped foreign layer") 277 return d, nil 278 } 279 } 280 281 diffID := pd.DiffID() 282 283 pd.pushState.Lock() 284 if descriptor, ok := pd.pushState.remoteLayers[diffID]; ok { 285 // it is already known that the push is not needed and 286 // therefore doing a stat is unnecessary 287 pd.pushState.Unlock() 288 progress.Update(progressOutput, pd.ID(), "Layer already exists") 289 return descriptor, nil 290 } 291 pd.pushState.Unlock() 292 293 maxMountAttempts, maxExistenceChecks, checkOtherRepositories := getMaxMountAndExistenceCheckAttempts(pd.layer) 294 295 // Do we have any metadata associated with this layer's DiffID? 296 v2Metadata, err := pd.v2MetadataService.GetMetadata(diffID) 297 if err == nil { 298 // check for blob existence in the target repository if we have a mapping with it 299 descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, false, 1, v2Metadata) 300 if exists || err != nil { 301 return descriptor, err 302 } 303 } 304 305 // if digest was empty or not saved, or if blob does not exist on the remote repository, 306 // then push the blob. 307 bs := pd.repo.Blobs(ctx) 308 309 var layerUpload distribution.BlobWriter 310 311 // Attempt to find another repository in the same registry to mount the layer from to avoid an unnecessary upload 312 candidates := getRepositoryMountCandidates(pd.repoInfo, pd.hmacKey, maxMountAttempts, v2Metadata) 313 for _, mountCandidate := range candidates { 314 logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, mountCandidate.Digest, mountCandidate.SourceRepository) 315 createOpts := []distribution.BlobCreateOption{} 316 317 if len(mountCandidate.SourceRepository) > 0 { 318 namedRef, err := reference.WithName(mountCandidate.SourceRepository) 319 if err != nil { 320 logrus.Errorf("failed to parse source repository reference %v: %v", namedRef.String(), err) 321 pd.v2MetadataService.Remove(mountCandidate) 322 continue 323 } 324 325 // TODO (brianbland): We need to construct a reference where the Name is 326 // only the full remote name, so clean this up when distribution has a 327 // richer reference package 328 remoteRef, err := distreference.WithName(namedRef.RemoteName()) 329 if err != nil { 330 logrus.Errorf("failed to make remote reference out of %q: %v", namedRef.RemoteName(), namedRef.RemoteName()) 331 continue 332 } 333 334 canonicalRef, err := distreference.WithDigest(remoteRef, mountCandidate.Digest) 335 if err != nil { 336 logrus.Errorf("failed to make canonical reference: %v", err) 337 continue 338 } 339 340 createOpts = append(createOpts, client.WithMountFrom(canonicalRef)) 341 } 342 343 // send the layer 344 lu, err := bs.Create(ctx, createOpts...) 345 switch err := err.(type) { 346 case nil: 347 // noop 348 case distribution.ErrBlobMounted: 349 progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name()) 350 351 err.Descriptor.MediaType = schema2.MediaTypeLayer 352 353 pd.pushState.Lock() 354 pd.pushState.confirmedV2 = true 355 pd.pushState.remoteLayers[diffID] = err.Descriptor 356 pd.pushState.Unlock() 357 358 // Cache mapping from this layer's DiffID to the blobsum 359 if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ 360 Digest: err.Descriptor.Digest, 361 SourceRepository: pd.repoInfo.FullName(), 362 }); err != nil { 363 return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} 364 } 365 return err.Descriptor, nil 366 default: 367 logrus.Infof("failed to mount layer %s (%s) from %s: %v", diffID, mountCandidate.Digest, mountCandidate.SourceRepository, err) 368 } 369 370 if len(mountCandidate.SourceRepository) > 0 && 371 (metadata.CheckV2MetadataHMAC(&mountCandidate, pd.hmacKey) || 372 len(mountCandidate.HMAC) == 0) { 373 cause := "blob mount failure" 374 if err != nil { 375 cause = fmt.Sprintf("an error: %v", err.Error()) 376 } 377 logrus.Debugf("removing association between layer %s and %s due to %s", mountCandidate.Digest, mountCandidate.SourceRepository, cause) 378 pd.v2MetadataService.Remove(mountCandidate) 379 } 380 381 if lu != nil { 382 // cancel previous upload 383 cancelLayerUpload(ctx, mountCandidate.Digest, layerUpload) 384 layerUpload = lu 385 } 386 } 387 388 if maxExistenceChecks-len(pd.checkedDigests) > 0 { 389 // do additional layer existence checks with other known digests if any 390 descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, checkOtherRepositories, maxExistenceChecks-len(pd.checkedDigests), v2Metadata) 391 if exists || err != nil { 392 return descriptor, err 393 } 394 } 395 396 logrus.Debugf("Pushing layer: %s", diffID) 397 if layerUpload == nil { 398 layerUpload, err = bs.Create(ctx) 399 if err != nil { 400 return distribution.Descriptor{}, retryOnError(err) 401 } 402 } 403 defer layerUpload.Close() 404 405 // upload the blob 406 desc, err := pd.uploadUsingSession(ctx, progressOutput, diffID, layerUpload) 407 if err != nil { 408 return desc, err 409 } 410 411 return desc, nil 412 } 413 414 func (pd *v2PushDescriptor) SetRemoteDescriptor(descriptor distribution.Descriptor) { 415 pd.remoteDescriptor = descriptor 416 } 417 418 func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor { 419 return pd.remoteDescriptor 420 } 421 422 func (pd *v2PushDescriptor) uploadUsingSession( 423 ctx context.Context, 424 progressOutput progress.Output, 425 diffID layer.DiffID, 426 layerUpload distribution.BlobWriter, 427 ) (distribution.Descriptor, error) { 428 arch, err := pd.layer.TarStream() 429 if err != nil { 430 return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} 431 } 432 433 // don't care if this fails; best effort 434 size, _ := pd.layer.DiffSize() 435 436 reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), progressOutput, size, pd.ID(), "Pushing") 437 compressedReader, compressionDone := compress(reader) 438 defer func() { 439 reader.Close() 440 <-compressionDone 441 }() 442 443 digester := digest.Canonical.New() 444 tee := io.TeeReader(compressedReader, digester.Hash()) 445 446 nn, err := layerUpload.ReadFrom(tee) 447 compressedReader.Close() 448 if err != nil { 449 return distribution.Descriptor{}, retryOnError(err) 450 } 451 452 pushDigest := digester.Digest() 453 if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil { 454 return distribution.Descriptor{}, retryOnError(err) 455 } 456 457 logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn) 458 progress.Update(progressOutput, pd.ID(), "Pushed") 459 460 // Cache mapping from this layer's DiffID to the blobsum 461 if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ 462 Digest: pushDigest, 463 SourceRepository: pd.repoInfo.FullName(), 464 }); err != nil { 465 return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} 466 } 467 468 desc := distribution.Descriptor{ 469 Digest: pushDigest, 470 MediaType: schema2.MediaTypeLayer, 471 Size: nn, 472 } 473 474 pd.pushState.Lock() 475 // If Commit succeeded, that's an indication that the remote registry speaks the v2 protocol. 476 pd.pushState.confirmedV2 = true 477 pd.pushState.remoteLayers[diffID] = desc 478 pd.pushState.Unlock() 479 480 return desc, nil 481 } 482 483 // layerAlreadyExists checks if the registry already knows about any of the metadata passed in the "metadata" 484 // slice. If it finds one that the registry knows about, it returns the known digest and "true". If 485 // "checkOtherRepositories" is true, stat will be performed also with digests mapped to any other repository 486 // (not just the target one). 487 func (pd *v2PushDescriptor) layerAlreadyExists( 488 ctx context.Context, 489 progressOutput progress.Output, 490 diffID layer.DiffID, 491 checkOtherRepositories bool, 492 maxExistenceCheckAttempts int, 493 v2Metadata []metadata.V2Metadata, 494 ) (desc distribution.Descriptor, exists bool, err error) { 495 // filter the metadata 496 candidates := []metadata.V2Metadata{} 497 for _, meta := range v2Metadata { 498 if len(meta.SourceRepository) > 0 && !checkOtherRepositories && meta.SourceRepository != pd.repoInfo.FullName() { 499 continue 500 } 501 candidates = append(candidates, meta) 502 } 503 // sort the candidates by similarity 504 sortV2MetadataByLikenessAndAge(pd.repoInfo, pd.hmacKey, candidates) 505 506 digestToMetadata := make(map[digest.Digest]*metadata.V2Metadata) 507 // an array of unique blob digests ordered from the best mount candidates to worst 508 layerDigests := []digest.Digest{} 509 for i := 0; i < len(candidates); i++ { 510 if len(layerDigests) >= maxExistenceCheckAttempts { 511 break 512 } 513 meta := &candidates[i] 514 if _, exists := digestToMetadata[meta.Digest]; exists { 515 // keep reference just to the first mapping (the best mount candidate) 516 continue 517 } 518 if _, exists := pd.checkedDigests[meta.Digest]; exists { 519 // existence of this digest has already been tested 520 continue 521 } 522 digestToMetadata[meta.Digest] = meta 523 layerDigests = append(layerDigests, meta.Digest) 524 } 525 526 for _, dgst := range layerDigests { 527 meta := digestToMetadata[dgst] 528 logrus.Debugf("Checking for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.FullName()) 529 desc, err = pd.repo.Blobs(ctx).Stat(ctx, dgst) 530 pd.checkedDigests[meta.Digest] = struct{}{} 531 switch err { 532 case nil: 533 if m, ok := digestToMetadata[desc.Digest]; !ok || m.SourceRepository != pd.repoInfo.FullName() || !metadata.CheckV2MetadataHMAC(m, pd.hmacKey) { 534 // cache mapping from this layer's DiffID to the blobsum 535 if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ 536 Digest: desc.Digest, 537 SourceRepository: pd.repoInfo.FullName(), 538 }); err != nil { 539 return distribution.Descriptor{}, false, xfer.DoNotRetry{Err: err} 540 } 541 } 542 desc.MediaType = schema2.MediaTypeLayer 543 exists = true 544 break 545 case distribution.ErrBlobUnknown: 546 if meta.SourceRepository == pd.repoInfo.FullName() { 547 // remove the mapping to the target repository 548 pd.v2MetadataService.Remove(*meta) 549 } 550 default: 551 progress.Update(progressOutput, pd.ID(), "Image push failed") 552 return desc, false, retryOnError(err) 553 } 554 } 555 556 if exists { 557 progress.Update(progressOutput, pd.ID(), "Layer already exists") 558 pd.pushState.Lock() 559 pd.pushState.remoteLayers[diffID] = desc 560 pd.pushState.Unlock() 561 } 562 563 return desc, exists, nil 564 } 565 566 // getMaxMountAndExistenceCheckAttempts returns a maximum number of cross repository mount attempts from 567 // source repositories of target registry, maximum number of layer existence checks performed on the target 568 // repository and whether the check shall be done also with digests mapped to different repositories. The 569 // decision is based on layer size. The smaller the layer, the fewer attempts shall be made because the cost 570 // of upload does not outweigh a latency. 571 func getMaxMountAndExistenceCheckAttempts(layer layer.Layer) (maxMountAttempts, maxExistenceCheckAttempts int, checkOtherRepositories bool) { 572 size, err := layer.DiffSize() 573 switch { 574 // big blob 575 case size > middleLayerMaximumSize: 576 // 1st attempt to mount the blob few times 577 // 2nd few existence checks with digests associated to any repository 578 // then fallback to upload 579 return 4, 3, true 580 581 // middle sized blobs; if we could not get the size, assume we deal with middle sized blob 582 case size > smallLayerMaximumSize, err != nil: 583 // 1st attempt to mount blobs of average size few times 584 // 2nd try at most 1 existence check if there's an existing mapping to the target repository 585 // then fallback to upload 586 return 3, 1, false 587 588 // small blobs, do a minimum number of checks 589 default: 590 return 1, 1, false 591 } 592 } 593 594 // getRepositoryMountCandidates returns an array of v2 metadata items belonging to the given registry. The 595 // array is sorted from youngest to oldest. If requireReigstryMatch is true, the resulting array will contain 596 // only metadata entries having registry part of SourceRepository matching the part of repoInfo. 597 func getRepositoryMountCandidates( 598 repoInfo reference.Named, 599 hmacKey []byte, 600 max int, 601 v2Metadata []metadata.V2Metadata, 602 ) []metadata.V2Metadata { 603 candidates := []metadata.V2Metadata{} 604 for _, meta := range v2Metadata { 605 sourceRepo, err := reference.ParseNamed(meta.SourceRepository) 606 if err != nil || repoInfo.Hostname() != sourceRepo.Hostname() { 607 continue 608 } 609 // target repository is not a viable candidate 610 if meta.SourceRepository == repoInfo.FullName() { 611 continue 612 } 613 candidates = append(candidates, meta) 614 } 615 616 sortV2MetadataByLikenessAndAge(repoInfo, hmacKey, candidates) 617 if max >= 0 && len(candidates) > max { 618 // select the youngest metadata 619 candidates = candidates[:max] 620 } 621 622 return candidates 623 } 624 625 // byLikeness is a sorting container for v2 metadata candidates for cross repository mount. The 626 // candidate "a" is preferred over "b": 627 // 628 // 1. if it was hashed using the same AuthConfig as the one used to authenticate to target repository and the 629 // "b" was not 630 // 2. if a number of its repository path components exactly matching path components of target repository is higher 631 type byLikeness struct { 632 arr []metadata.V2Metadata 633 hmacKey []byte 634 pathComponents []string 635 } 636 637 func (bla byLikeness) Less(i, j int) bool { 638 aMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[i], bla.hmacKey) 639 bMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[j], bla.hmacKey) 640 if aMacMatch != bMacMatch { 641 return aMacMatch 642 } 643 aMatch := numOfMatchingPathComponents(bla.arr[i].SourceRepository, bla.pathComponents) 644 bMatch := numOfMatchingPathComponents(bla.arr[j].SourceRepository, bla.pathComponents) 645 return aMatch > bMatch 646 } 647 func (bla byLikeness) Swap(i, j int) { 648 bla.arr[i], bla.arr[j] = bla.arr[j], bla.arr[i] 649 } 650 func (bla byLikeness) Len() int { return len(bla.arr) } 651 652 func sortV2MetadataByLikenessAndAge(repoInfo reference.Named, hmacKey []byte, marr []metadata.V2Metadata) { 653 // reverse the metadata array to shift the newest entries to the beginning 654 for i := 0; i < len(marr)/2; i++ { 655 marr[i], marr[len(marr)-i-1] = marr[len(marr)-i-1], marr[i] 656 } 657 // keep equal entries ordered from the youngest to the oldest 658 sort.Stable(byLikeness{ 659 arr: marr, 660 hmacKey: hmacKey, 661 pathComponents: getPathComponents(repoInfo.FullName()), 662 }) 663 } 664 665 // numOfMatchingPathComponents returns a number of path components in "pth" that exactly match "matchComponents". 666 func numOfMatchingPathComponents(pth string, matchComponents []string) int { 667 pthComponents := getPathComponents(pth) 668 i := 0 669 for ; i < len(pthComponents) && i < len(matchComponents); i++ { 670 if matchComponents[i] != pthComponents[i] { 671 return i 672 } 673 } 674 return i 675 } 676 677 func getPathComponents(path string) []string { 678 // make sure to add docker.io/ prefix to the path 679 named, err := reference.ParseNamed(path) 680 if err == nil { 681 path = named.FullName() 682 } 683 return strings.Split(path, "/") 684 } 685 686 func cancelLayerUpload(ctx context.Context, dgst digest.Digest, layerUpload distribution.BlobWriter) { 687 if layerUpload != nil { 688 logrus.Debugf("cancelling upload of blob %s", dgst) 689 err := layerUpload.Cancel(ctx) 690 if err != nil { 691 logrus.Warnf("failed to cancel upload: %v", err) 692 } 693 } 694 }