github.com/fabiokung/docker@v0.11.2-0.20170222101415-4534dcd49497/distribution/push_v2.go (about) 1 package distribution 2 3 import ( 4 "errors" 5 "fmt" 6 "io" 7 "runtime" 8 "sort" 9 "strings" 10 "sync" 11 12 "golang.org/x/net/context" 13 14 "github.com/Sirupsen/logrus" 15 "github.com/docker/distribution" 16 "github.com/docker/distribution/manifest/schema1" 17 "github.com/docker/distribution/manifest/schema2" 18 "github.com/docker/distribution/reference" 19 "github.com/docker/distribution/registry/client" 20 apitypes "github.com/docker/docker/api/types" 21 "github.com/docker/docker/distribution/metadata" 22 "github.com/docker/docker/distribution/xfer" 23 "github.com/docker/docker/layer" 24 "github.com/docker/docker/pkg/ioutils" 25 "github.com/docker/docker/pkg/progress" 26 "github.com/docker/docker/pkg/stringid" 27 "github.com/docker/docker/registry" 28 "github.com/opencontainers/go-digest" 29 ) 30 31 const ( 32 smallLayerMaximumSize = 100 * (1 << 10) // 100KB 33 middleLayerMaximumSize = 10 * (1 << 20) // 10MB 34 ) 35 36 type v2Pusher struct { 37 v2MetadataService metadata.V2MetadataService 38 ref reference.Named 39 endpoint registry.APIEndpoint 40 repoInfo *registry.RepositoryInfo 41 config *ImagePushConfig 42 repo distribution.Repository 43 44 // pushState is state built by the Upload functions. 45 pushState pushState 46 } 47 48 type pushState struct { 49 sync.Mutex 50 // remoteLayers is the set of layers known to exist on the remote side. 51 // This avoids redundant queries when pushing multiple tags that 52 // involve the same layers. It is also used to fill in digest and size 53 // information when building the manifest. 54 remoteLayers map[layer.DiffID]distribution.Descriptor 55 // confirmedV2 is set to true if we confirm we're talking to a v2 56 // registry. This is used to limit fallbacks to the v1 protocol. 57 confirmedV2 bool 58 } 59 60 func (p *v2Pusher) Push(ctx context.Context) (err error) { 61 p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor) 62 63 p.repo, p.pushState.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull") 64 if err != nil { 65 logrus.Debugf("Error getting v2 registry: %v", err) 66 return err 67 } 68 69 if err = p.pushV2Repository(ctx); err != nil { 70 if continueOnError(err) { 71 return fallbackError{ 72 err: err, 73 confirmedV2: p.pushState.confirmedV2, 74 transportOK: true, 75 } 76 } 77 } 78 return err 79 } 80 81 func (p *v2Pusher) pushV2Repository(ctx context.Context) (err error) { 82 if namedTagged, isNamedTagged := p.ref.(reference.NamedTagged); isNamedTagged { 83 imageID, err := p.config.ReferenceStore.Get(p.ref) 84 if err != nil { 85 return fmt.Errorf("tag does not exist: %s", reference.FamiliarString(p.ref)) 86 } 87 88 return p.pushV2Tag(ctx, namedTagged, imageID) 89 } 90 91 if !reference.IsNameOnly(p.ref) { 92 return errors.New("cannot push a digest reference") 93 } 94 95 // Pull all tags 96 pushed := 0 97 for _, association := range p.config.ReferenceStore.ReferencesByName(p.ref) { 98 if namedTagged, isNamedTagged := association.Ref.(reference.NamedTagged); isNamedTagged { 99 pushed++ 100 if err := p.pushV2Tag(ctx, namedTagged, association.ID); err != nil { 101 return err 102 } 103 } 104 } 105 106 if pushed == 0 { 107 return fmt.Errorf("no tags to push for %s", reference.FamiliarName(p.repoInfo.Name)) 108 } 109 110 return nil 111 } 112 113 func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id digest.Digest) error { 114 logrus.Debugf("Pushing repository: %s", reference.FamiliarString(ref)) 115 116 imgConfig, err := p.config.ImageStore.Get(id) 117 if err != nil { 118 return fmt.Errorf("could not find image from tag %s: %v", reference.FamiliarString(ref), err) 119 } 120 121 rootfs, err := p.config.ImageStore.RootFSFromConfig(imgConfig) 122 if err != nil { 123 return fmt.Errorf("unable to get rootfs for image %s: %s", reference.FamiliarString(ref), err) 124 } 125 126 l, err := p.config.LayerStore.Get(rootfs.ChainID()) 127 if err != nil { 128 return fmt.Errorf("failed to get top layer from image: %v", err) 129 } 130 defer l.Release() 131 132 hmacKey, err := metadata.ComputeV2MetadataHMACKey(p.config.AuthConfig) 133 if err != nil { 134 return fmt.Errorf("failed to compute hmac key of auth config: %v", err) 135 } 136 137 var descriptors []xfer.UploadDescriptor 138 139 descriptorTemplate := v2PushDescriptor{ 140 v2MetadataService: p.v2MetadataService, 141 hmacKey: hmacKey, 142 repoInfo: p.repoInfo.Name, 143 ref: p.ref, 144 repo: p.repo, 145 pushState: &p.pushState, 146 } 147 148 // Loop bounds condition is to avoid pushing the base layer on Windows. 149 for i := 0; i < len(rootfs.DiffIDs); i++ { 150 descriptor := descriptorTemplate 151 descriptor.layer = l 152 descriptor.checkedDigests = make(map[digest.Digest]struct{}) 153 descriptors = append(descriptors, &descriptor) 154 155 l = l.Parent() 156 } 157 158 if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil { 159 return err 160 } 161 162 // Try schema2 first 163 builder := schema2.NewManifestBuilder(p.repo.Blobs(ctx), p.config.ConfigMediaType, imgConfig) 164 manifest, err := manifestFromBuilder(ctx, builder, descriptors) 165 if err != nil { 166 return err 167 } 168 169 manSvc, err := p.repo.Manifests(ctx) 170 if err != nil { 171 return err 172 } 173 174 putOptions := []distribution.ManifestServiceOption{distribution.WithTag(ref.Tag())} 175 if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { 176 if runtime.GOOS == "windows" || p.config.TrustKey == nil || p.config.RequireSchema2 { 177 logrus.Warnf("failed to upload schema2 manifest: %v", err) 178 return err 179 } 180 181 logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err) 182 183 manifestRef, err := reference.WithTag(p.repo.Named(), ref.Tag()) 184 if err != nil { 185 return err 186 } 187 builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, manifestRef, imgConfig) 188 manifest, err = manifestFromBuilder(ctx, builder, descriptors) 189 if err != nil { 190 return err 191 } 192 193 if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { 194 return err 195 } 196 } 197 198 var canonicalManifest []byte 199 200 switch v := manifest.(type) { 201 case *schema1.SignedManifest: 202 canonicalManifest = v.Canonical 203 case *schema2.DeserializedManifest: 204 _, canonicalManifest, err = v.Payload() 205 if err != nil { 206 return err 207 } 208 } 209 210 manifestDigest := digest.FromBytes(canonicalManifest) 211 progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", ref.Tag(), manifestDigest, len(canonicalManifest)) 212 213 if err := addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { 214 return err 215 } 216 217 // Signal digest to the trust client so it can sign the 218 // push, if appropriate. 219 progress.Aux(p.config.ProgressOutput, apitypes.PushResult{Tag: ref.Tag(), Digest: manifestDigest.String(), Size: len(canonicalManifest)}) 220 221 return nil 222 } 223 224 func manifestFromBuilder(ctx context.Context, builder distribution.ManifestBuilder, descriptors []xfer.UploadDescriptor) (distribution.Manifest, error) { 225 // descriptors is in reverse order; iterate backwards to get references 226 // appended in the right order. 227 for i := len(descriptors) - 1; i >= 0; i-- { 228 if err := builder.AppendReference(descriptors[i].(*v2PushDescriptor)); err != nil { 229 return nil, err 230 } 231 } 232 233 return builder.Build(ctx) 234 } 235 236 type v2PushDescriptor struct { 237 layer PushLayer 238 v2MetadataService metadata.V2MetadataService 239 hmacKey []byte 240 repoInfo reference.Named 241 ref reference.Named 242 repo distribution.Repository 243 pushState *pushState 244 remoteDescriptor distribution.Descriptor 245 // a set of digests whose presence has been checked in a target repository 246 checkedDigests map[digest.Digest]struct{} 247 } 248 249 func (pd *v2PushDescriptor) Key() string { 250 return "v2push:" + pd.ref.Name() + " " + pd.layer.DiffID().String() 251 } 252 253 func (pd *v2PushDescriptor) ID() string { 254 return stringid.TruncateID(pd.layer.DiffID().String()) 255 } 256 257 func (pd *v2PushDescriptor) DiffID() layer.DiffID { 258 return pd.layer.DiffID() 259 } 260 261 func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { 262 if fs, ok := pd.layer.(distribution.Describable); ok { 263 if d := fs.Descriptor(); len(d.URLs) > 0 { 264 progress.Update(progressOutput, pd.ID(), "Skipped foreign layer") 265 return d, nil 266 } 267 } 268 269 diffID := pd.DiffID() 270 271 pd.pushState.Lock() 272 if descriptor, ok := pd.pushState.remoteLayers[diffID]; ok { 273 // it is already known that the push is not needed and 274 // therefore doing a stat is unnecessary 275 pd.pushState.Unlock() 276 progress.Update(progressOutput, pd.ID(), "Layer already exists") 277 return descriptor, nil 278 } 279 pd.pushState.Unlock() 280 281 maxMountAttempts, maxExistenceChecks, checkOtherRepositories := getMaxMountAndExistenceCheckAttempts(pd.layer) 282 283 // Do we have any metadata associated with this layer's DiffID? 284 v2Metadata, err := pd.v2MetadataService.GetMetadata(diffID) 285 if err == nil { 286 // check for blob existence in the target repository if we have a mapping with it 287 descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, false, 1, v2Metadata) 288 if exists || err != nil { 289 return descriptor, err 290 } 291 } 292 293 // if digest was empty or not saved, or if blob does not exist on the remote repository, 294 // then push the blob. 295 bs := pd.repo.Blobs(ctx) 296 297 var layerUpload distribution.BlobWriter 298 299 // Attempt to find another repository in the same registry to mount the layer from to avoid an unnecessary upload 300 candidates := getRepositoryMountCandidates(pd.repoInfo, pd.hmacKey, maxMountAttempts, v2Metadata) 301 for _, mountCandidate := range candidates { 302 logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, mountCandidate.Digest, mountCandidate.SourceRepository) 303 createOpts := []distribution.BlobCreateOption{} 304 305 if len(mountCandidate.SourceRepository) > 0 { 306 namedRef, err := reference.ParseNormalizedNamed(mountCandidate.SourceRepository) 307 if err != nil { 308 logrus.Errorf("failed to parse source repository reference %v: %v", reference.FamiliarString(namedRef), err) 309 pd.v2MetadataService.Remove(mountCandidate) 310 continue 311 } 312 313 // Candidates are always under same domain, create remote reference 314 // with only path to set mount from with 315 remoteRef, err := reference.WithName(reference.Path(namedRef)) 316 if err != nil { 317 logrus.Errorf("failed to make remote reference out of %q: %v", reference.Path(namedRef), err) 318 continue 319 } 320 321 canonicalRef, err := reference.WithDigest(reference.TrimNamed(remoteRef), mountCandidate.Digest) 322 if err != nil { 323 logrus.Errorf("failed to make canonical reference: %v", err) 324 continue 325 } 326 327 createOpts = append(createOpts, client.WithMountFrom(canonicalRef)) 328 } 329 330 // send the layer 331 lu, err := bs.Create(ctx, createOpts...) 332 switch err := err.(type) { 333 case nil: 334 // noop 335 case distribution.ErrBlobMounted: 336 progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name()) 337 338 err.Descriptor.MediaType = schema2.MediaTypeLayer 339 340 pd.pushState.Lock() 341 pd.pushState.confirmedV2 = true 342 pd.pushState.remoteLayers[diffID] = err.Descriptor 343 pd.pushState.Unlock() 344 345 // Cache mapping from this layer's DiffID to the blobsum 346 if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ 347 Digest: err.Descriptor.Digest, 348 SourceRepository: pd.repoInfo.Name(), 349 }); err != nil { 350 return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} 351 } 352 return err.Descriptor, nil 353 default: 354 logrus.Infof("failed to mount layer %s (%s) from %s: %v", diffID, mountCandidate.Digest, mountCandidate.SourceRepository, err) 355 } 356 357 if len(mountCandidate.SourceRepository) > 0 && 358 (metadata.CheckV2MetadataHMAC(&mountCandidate, pd.hmacKey) || 359 len(mountCandidate.HMAC) == 0) { 360 cause := "blob mount failure" 361 if err != nil { 362 cause = fmt.Sprintf("an error: %v", err.Error()) 363 } 364 logrus.Debugf("removing association between layer %s and %s due to %s", mountCandidate.Digest, mountCandidate.SourceRepository, cause) 365 pd.v2MetadataService.Remove(mountCandidate) 366 } 367 368 if lu != nil { 369 // cancel previous upload 370 cancelLayerUpload(ctx, mountCandidate.Digest, layerUpload) 371 layerUpload = lu 372 } 373 } 374 375 if maxExistenceChecks-len(pd.checkedDigests) > 0 { 376 // do additional layer existence checks with other known digests if any 377 descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, checkOtherRepositories, maxExistenceChecks-len(pd.checkedDigests), v2Metadata) 378 if exists || err != nil { 379 return descriptor, err 380 } 381 } 382 383 logrus.Debugf("Pushing layer: %s", diffID) 384 if layerUpload == nil { 385 layerUpload, err = bs.Create(ctx) 386 if err != nil { 387 return distribution.Descriptor{}, retryOnError(err) 388 } 389 } 390 defer layerUpload.Close() 391 392 // upload the blob 393 desc, err := pd.uploadUsingSession(ctx, progressOutput, diffID, layerUpload) 394 if err != nil { 395 return desc, err 396 } 397 398 return desc, nil 399 } 400 401 func (pd *v2PushDescriptor) SetRemoteDescriptor(descriptor distribution.Descriptor) { 402 pd.remoteDescriptor = descriptor 403 } 404 405 func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor { 406 return pd.remoteDescriptor 407 } 408 409 func (pd *v2PushDescriptor) uploadUsingSession( 410 ctx context.Context, 411 progressOutput progress.Output, 412 diffID layer.DiffID, 413 layerUpload distribution.BlobWriter, 414 ) (distribution.Descriptor, error) { 415 var reader io.ReadCloser 416 417 contentReader, err := pd.layer.Open() 418 size, _ := pd.layer.Size() 419 420 reader = progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, contentReader), progressOutput, size, pd.ID(), "Pushing") 421 422 switch m := pd.layer.MediaType(); m { 423 case schema2.MediaTypeUncompressedLayer: 424 compressedReader, compressionDone := compress(reader) 425 defer func(closer io.Closer) { 426 closer.Close() 427 <-compressionDone 428 }(reader) 429 reader = compressedReader 430 case schema2.MediaTypeLayer: 431 default: 432 reader.Close() 433 return distribution.Descriptor{}, fmt.Errorf("unsupported layer media type %s", m) 434 } 435 436 digester := digest.Canonical.Digester() 437 tee := io.TeeReader(reader, digester.Hash()) 438 439 nn, err := layerUpload.ReadFrom(tee) 440 reader.Close() 441 if err != nil { 442 return distribution.Descriptor{}, retryOnError(err) 443 } 444 445 pushDigest := digester.Digest() 446 if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil { 447 return distribution.Descriptor{}, retryOnError(err) 448 } 449 450 logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn) 451 progress.Update(progressOutput, pd.ID(), "Pushed") 452 453 // Cache mapping from this layer's DiffID to the blobsum 454 if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ 455 Digest: pushDigest, 456 SourceRepository: pd.repoInfo.Name(), 457 }); err != nil { 458 return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} 459 } 460 461 desc := distribution.Descriptor{ 462 Digest: pushDigest, 463 MediaType: schema2.MediaTypeLayer, 464 Size: nn, 465 } 466 467 pd.pushState.Lock() 468 // If Commit succeeded, that's an indication that the remote registry speaks the v2 protocol. 469 pd.pushState.confirmedV2 = true 470 pd.pushState.remoteLayers[diffID] = desc 471 pd.pushState.Unlock() 472 473 return desc, nil 474 } 475 476 // layerAlreadyExists checks if the registry already knows about any of the metadata passed in the "metadata" 477 // slice. If it finds one that the registry knows about, it returns the known digest and "true". If 478 // "checkOtherRepositories" is true, stat will be performed also with digests mapped to any other repository 479 // (not just the target one). 480 func (pd *v2PushDescriptor) layerAlreadyExists( 481 ctx context.Context, 482 progressOutput progress.Output, 483 diffID layer.DiffID, 484 checkOtherRepositories bool, 485 maxExistenceCheckAttempts int, 486 v2Metadata []metadata.V2Metadata, 487 ) (desc distribution.Descriptor, exists bool, err error) { 488 // filter the metadata 489 candidates := []metadata.V2Metadata{} 490 for _, meta := range v2Metadata { 491 if len(meta.SourceRepository) > 0 && !checkOtherRepositories && meta.SourceRepository != pd.repoInfo.Name() { 492 continue 493 } 494 candidates = append(candidates, meta) 495 } 496 // sort the candidates by similarity 497 sortV2MetadataByLikenessAndAge(pd.repoInfo, pd.hmacKey, candidates) 498 499 digestToMetadata := make(map[digest.Digest]*metadata.V2Metadata) 500 // an array of unique blob digests ordered from the best mount candidates to worst 501 layerDigests := []digest.Digest{} 502 for i := 0; i < len(candidates); i++ { 503 if len(layerDigests) >= maxExistenceCheckAttempts { 504 break 505 } 506 meta := &candidates[i] 507 if _, exists := digestToMetadata[meta.Digest]; exists { 508 // keep reference just to the first mapping (the best mount candidate) 509 continue 510 } 511 if _, exists := pd.checkedDigests[meta.Digest]; exists { 512 // existence of this digest has already been tested 513 continue 514 } 515 digestToMetadata[meta.Digest] = meta 516 layerDigests = append(layerDigests, meta.Digest) 517 } 518 519 attempts: 520 for _, dgst := range layerDigests { 521 meta := digestToMetadata[dgst] 522 logrus.Debugf("Checking for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.Name()) 523 desc, err = pd.repo.Blobs(ctx).Stat(ctx, dgst) 524 pd.checkedDigests[meta.Digest] = struct{}{} 525 switch err { 526 case nil: 527 if m, ok := digestToMetadata[desc.Digest]; !ok || m.SourceRepository != pd.repoInfo.Name() || !metadata.CheckV2MetadataHMAC(m, pd.hmacKey) { 528 // cache mapping from this layer's DiffID to the blobsum 529 if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ 530 Digest: desc.Digest, 531 SourceRepository: pd.repoInfo.Name(), 532 }); err != nil { 533 return distribution.Descriptor{}, false, xfer.DoNotRetry{Err: err} 534 } 535 } 536 desc.MediaType = schema2.MediaTypeLayer 537 exists = true 538 break attempts 539 case distribution.ErrBlobUnknown: 540 if meta.SourceRepository == pd.repoInfo.Name() { 541 // remove the mapping to the target repository 542 pd.v2MetadataService.Remove(*meta) 543 } 544 default: 545 logrus.WithError(err).Debugf("Failed to check for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.Name()) 546 } 547 } 548 549 if exists { 550 progress.Update(progressOutput, pd.ID(), "Layer already exists") 551 pd.pushState.Lock() 552 pd.pushState.remoteLayers[diffID] = desc 553 pd.pushState.Unlock() 554 } 555 556 return desc, exists, nil 557 } 558 559 // getMaxMountAndExistenceCheckAttempts returns a maximum number of cross repository mount attempts from 560 // source repositories of target registry, maximum number of layer existence checks performed on the target 561 // repository and whether the check shall be done also with digests mapped to different repositories. The 562 // decision is based on layer size. The smaller the layer, the fewer attempts shall be made because the cost 563 // of upload does not outweigh a latency. 564 func getMaxMountAndExistenceCheckAttempts(layer PushLayer) (maxMountAttempts, maxExistenceCheckAttempts int, checkOtherRepositories bool) { 565 size, err := layer.Size() 566 switch { 567 // big blob 568 case size > middleLayerMaximumSize: 569 // 1st attempt to mount the blob few times 570 // 2nd few existence checks with digests associated to any repository 571 // then fallback to upload 572 return 4, 3, true 573 574 // middle sized blobs; if we could not get the size, assume we deal with middle sized blob 575 case size > smallLayerMaximumSize, err != nil: 576 // 1st attempt to mount blobs of average size few times 577 // 2nd try at most 1 existence check if there's an existing mapping to the target repository 578 // then fallback to upload 579 return 3, 1, false 580 581 // small blobs, do a minimum number of checks 582 default: 583 return 1, 1, false 584 } 585 } 586 587 // getRepositoryMountCandidates returns an array of v2 metadata items belonging to the given registry. The 588 // array is sorted from youngest to oldest. If requireReigstryMatch is true, the resulting array will contain 589 // only metadata entries having registry part of SourceRepository matching the part of repoInfo. 590 func getRepositoryMountCandidates( 591 repoInfo reference.Named, 592 hmacKey []byte, 593 max int, 594 v2Metadata []metadata.V2Metadata, 595 ) []metadata.V2Metadata { 596 candidates := []metadata.V2Metadata{} 597 for _, meta := range v2Metadata { 598 sourceRepo, err := reference.ParseNamed(meta.SourceRepository) 599 if err != nil || reference.Domain(repoInfo) != reference.Domain(sourceRepo) { 600 continue 601 } 602 // target repository is not a viable candidate 603 if meta.SourceRepository == repoInfo.Name() { 604 continue 605 } 606 candidates = append(candidates, meta) 607 } 608 609 sortV2MetadataByLikenessAndAge(repoInfo, hmacKey, candidates) 610 if max >= 0 && len(candidates) > max { 611 // select the youngest metadata 612 candidates = candidates[:max] 613 } 614 615 return candidates 616 } 617 618 // byLikeness is a sorting container for v2 metadata candidates for cross repository mount. The 619 // candidate "a" is preferred over "b": 620 // 621 // 1. if it was hashed using the same AuthConfig as the one used to authenticate to target repository and the 622 // "b" was not 623 // 2. if a number of its repository path components exactly matching path components of target repository is higher 624 type byLikeness struct { 625 arr []metadata.V2Metadata 626 hmacKey []byte 627 pathComponents []string 628 } 629 630 func (bla byLikeness) Less(i, j int) bool { 631 aMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[i], bla.hmacKey) 632 bMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[j], bla.hmacKey) 633 if aMacMatch != bMacMatch { 634 return aMacMatch 635 } 636 aMatch := numOfMatchingPathComponents(bla.arr[i].SourceRepository, bla.pathComponents) 637 bMatch := numOfMatchingPathComponents(bla.arr[j].SourceRepository, bla.pathComponents) 638 return aMatch > bMatch 639 } 640 func (bla byLikeness) Swap(i, j int) { 641 bla.arr[i], bla.arr[j] = bla.arr[j], bla.arr[i] 642 } 643 func (bla byLikeness) Len() int { return len(bla.arr) } 644 645 func sortV2MetadataByLikenessAndAge(repoInfo reference.Named, hmacKey []byte, marr []metadata.V2Metadata) { 646 // reverse the metadata array to shift the newest entries to the beginning 647 for i := 0; i < len(marr)/2; i++ { 648 marr[i], marr[len(marr)-i-1] = marr[len(marr)-i-1], marr[i] 649 } 650 // keep equal entries ordered from the youngest to the oldest 651 sort.Stable(byLikeness{ 652 arr: marr, 653 hmacKey: hmacKey, 654 pathComponents: getPathComponents(repoInfo.Name()), 655 }) 656 } 657 658 // numOfMatchingPathComponents returns a number of path components in "pth" that exactly match "matchComponents". 659 func numOfMatchingPathComponents(pth string, matchComponents []string) int { 660 pthComponents := getPathComponents(pth) 661 i := 0 662 for ; i < len(pthComponents) && i < len(matchComponents); i++ { 663 if matchComponents[i] != pthComponents[i] { 664 return i 665 } 666 } 667 return i 668 } 669 670 func getPathComponents(path string) []string { 671 return strings.Split(path, "/") 672 } 673 674 func cancelLayerUpload(ctx context.Context, dgst digest.Digest, layerUpload distribution.BlobWriter) { 675 if layerUpload != nil { 676 logrus.Debugf("cancelling upload of blob %s", dgst) 677 err := layerUpload.Cancel(ctx) 678 if err != nil { 679 logrus.Warnf("failed to cancel upload: %v", err) 680 } 681 } 682 }