github.com/fabiokung/docker@v0.11.2-0.20170222101415-4534dcd49497/distribution/pull_v2.go (about) 1 package distribution 2 3 import ( 4 "encoding/json" 5 "errors" 6 "fmt" 7 "io" 8 "io/ioutil" 9 "net/url" 10 "os" 11 "runtime" 12 13 "github.com/Sirupsen/logrus" 14 "github.com/docker/distribution" 15 "github.com/docker/distribution/manifest/manifestlist" 16 "github.com/docker/distribution/manifest/schema1" 17 "github.com/docker/distribution/manifest/schema2" 18 "github.com/docker/distribution/reference" 19 "github.com/docker/distribution/registry/api/errcode" 20 "github.com/docker/distribution/registry/client/auth" 21 "github.com/docker/distribution/registry/client/transport" 22 "github.com/docker/docker/distribution/metadata" 23 "github.com/docker/docker/distribution/xfer" 24 "github.com/docker/docker/image" 25 "github.com/docker/docker/image/v1" 26 "github.com/docker/docker/layer" 27 "github.com/docker/docker/pkg/ioutils" 28 "github.com/docker/docker/pkg/progress" 29 "github.com/docker/docker/pkg/stringid" 30 refstore "github.com/docker/docker/reference" 31 "github.com/docker/docker/registry" 32 "github.com/opencontainers/go-digest" 33 "golang.org/x/net/context" 34 ) 35 36 var ( 37 errRootFSMismatch = errors.New("layers from manifest don't match image configuration") 38 errRootFSInvalid = errors.New("invalid rootfs in image configuration") 39 ) 40 41 // ImageConfigPullError is an error pulling the image config blob 42 // (only applies to schema2). 43 type ImageConfigPullError struct { 44 Err error 45 } 46 47 // Error returns the error string for ImageConfigPullError. 48 func (e ImageConfigPullError) Error() string { 49 return "error pulling image configuration: " + e.Err.Error() 50 } 51 52 type v2Puller struct { 53 V2MetadataService metadata.V2MetadataService 54 endpoint registry.APIEndpoint 55 config *ImagePullConfig 56 repoInfo *registry.RepositoryInfo 57 repo distribution.Repository 58 // confirmedV2 is set to true if we confirm we're talking to a v2 59 // registry. This is used to limit fallbacks to the v1 protocol. 60 confirmedV2 bool 61 } 62 63 func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) { 64 // TODO(tiborvass): was ReceiveTimeout 65 p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") 66 if err != nil { 67 logrus.Warnf("Error getting v2 registry: %v", err) 68 return err 69 } 70 71 if err = p.pullV2Repository(ctx, ref); err != nil { 72 if _, ok := err.(fallbackError); ok { 73 return err 74 } 75 if continueOnError(err) { 76 logrus.Errorf("Error trying v2 registry: %v", err) 77 return fallbackError{ 78 err: err, 79 confirmedV2: p.confirmedV2, 80 transportOK: true, 81 } 82 } 83 } 84 return err 85 } 86 87 func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) { 88 var layersDownloaded bool 89 if !reference.IsNameOnly(ref) { 90 layersDownloaded, err = p.pullV2Tag(ctx, ref) 91 if err != nil { 92 return err 93 } 94 } else { 95 tags, err := p.repo.Tags(ctx).All(ctx) 96 if err != nil { 97 // If this repository doesn't exist on V2, we should 98 // permit a fallback to V1. 99 return allowV1Fallback(err) 100 } 101 102 // The v2 registry knows about this repository, so we will not 103 // allow fallback to the v1 protocol even if we encounter an 104 // error later on. 105 p.confirmedV2 = true 106 107 for _, tag := range tags { 108 tagRef, err := reference.WithTag(ref, tag) 109 if err != nil { 110 return err 111 } 112 pulledNew, err := p.pullV2Tag(ctx, tagRef) 113 if err != nil { 114 // Since this is the pull-all-tags case, don't 115 // allow an error pulling a particular tag to 116 // make the whole pull fall back to v1. 117 if fallbackErr, ok := err.(fallbackError); ok { 118 return fallbackErr.err 119 } 120 return err 121 } 122 // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged 123 // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? 124 layersDownloaded = layersDownloaded || pulledNew 125 } 126 } 127 128 writeStatus(reference.FamiliarString(ref), p.config.ProgressOutput, layersDownloaded) 129 130 return nil 131 } 132 133 type v2LayerDescriptor struct { 134 digest digest.Digest 135 repoInfo *registry.RepositoryInfo 136 repo distribution.Repository 137 V2MetadataService metadata.V2MetadataService 138 tmpFile *os.File 139 verifier digest.Verifier 140 src distribution.Descriptor 141 } 142 143 func (ld *v2LayerDescriptor) Key() string { 144 return "v2:" + ld.digest.String() 145 } 146 147 func (ld *v2LayerDescriptor) ID() string { 148 return stringid.TruncateID(ld.digest.String()) 149 } 150 151 func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) { 152 return ld.V2MetadataService.GetDiffID(ld.digest) 153 } 154 155 func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { 156 logrus.Debugf("pulling blob %q", ld.digest) 157 158 var ( 159 err error 160 offset int64 161 ) 162 163 if ld.tmpFile == nil { 164 ld.tmpFile, err = createDownloadFile() 165 if err != nil { 166 return nil, 0, xfer.DoNotRetry{Err: err} 167 } 168 } else { 169 offset, err = ld.tmpFile.Seek(0, os.SEEK_END) 170 if err != nil { 171 logrus.Debugf("error seeking to end of download file: %v", err) 172 offset = 0 173 174 ld.tmpFile.Close() 175 if err := os.Remove(ld.tmpFile.Name()); err != nil { 176 logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) 177 } 178 ld.tmpFile, err = createDownloadFile() 179 if err != nil { 180 return nil, 0, xfer.DoNotRetry{Err: err} 181 } 182 } else if offset != 0 { 183 logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset) 184 } 185 } 186 187 tmpFile := ld.tmpFile 188 189 layerDownload, err := ld.open(ctx) 190 if err != nil { 191 logrus.Errorf("Error initiating layer download: %v", err) 192 return nil, 0, retryOnError(err) 193 } 194 195 if offset != 0 { 196 _, err := layerDownload.Seek(offset, os.SEEK_SET) 197 if err != nil { 198 if err := ld.truncateDownloadFile(); err != nil { 199 return nil, 0, xfer.DoNotRetry{Err: err} 200 } 201 return nil, 0, err 202 } 203 } 204 size, err := layerDownload.Seek(0, os.SEEK_END) 205 if err != nil { 206 // Seek failed, perhaps because there was no Content-Length 207 // header. This shouldn't fail the download, because we can 208 // still continue without a progress bar. 209 size = 0 210 } else { 211 if size != 0 && offset > size { 212 logrus.Debug("Partial download is larger than full blob. Starting over") 213 offset = 0 214 if err := ld.truncateDownloadFile(); err != nil { 215 return nil, 0, xfer.DoNotRetry{Err: err} 216 } 217 } 218 219 // Restore the seek offset either at the beginning of the 220 // stream, or just after the last byte we have from previous 221 // attempts. 222 _, err = layerDownload.Seek(offset, os.SEEK_SET) 223 if err != nil { 224 return nil, 0, err 225 } 226 } 227 228 reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading") 229 defer reader.Close() 230 231 if ld.verifier == nil { 232 ld.verifier = ld.digest.Verifier() 233 } 234 235 _, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier)) 236 if err != nil { 237 if err == transport.ErrWrongCodeForByteRange { 238 if err := ld.truncateDownloadFile(); err != nil { 239 return nil, 0, xfer.DoNotRetry{Err: err} 240 } 241 return nil, 0, err 242 } 243 return nil, 0, retryOnError(err) 244 } 245 246 progress.Update(progressOutput, ld.ID(), "Verifying Checksum") 247 248 if !ld.verifier.Verified() { 249 err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest) 250 logrus.Error(err) 251 252 // Allow a retry if this digest verification error happened 253 // after a resumed download. 254 if offset != 0 { 255 if err := ld.truncateDownloadFile(); err != nil { 256 return nil, 0, xfer.DoNotRetry{Err: err} 257 } 258 259 return nil, 0, err 260 } 261 return nil, 0, xfer.DoNotRetry{Err: err} 262 } 263 264 progress.Update(progressOutput, ld.ID(), "Download complete") 265 266 logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) 267 268 _, err = tmpFile.Seek(0, os.SEEK_SET) 269 if err != nil { 270 tmpFile.Close() 271 if err := os.Remove(tmpFile.Name()); err != nil { 272 logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) 273 } 274 ld.tmpFile = nil 275 ld.verifier = nil 276 return nil, 0, xfer.DoNotRetry{Err: err} 277 } 278 279 // hand off the temporary file to the download manager, so it will only 280 // be closed once 281 ld.tmpFile = nil 282 283 return ioutils.NewReadCloserWrapper(tmpFile, func() error { 284 tmpFile.Close() 285 err := os.RemoveAll(tmpFile.Name()) 286 if err != nil { 287 logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) 288 } 289 return err 290 }), size, nil 291 } 292 293 func (ld *v2LayerDescriptor) Close() { 294 if ld.tmpFile != nil { 295 ld.tmpFile.Close() 296 if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { 297 logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) 298 } 299 } 300 } 301 302 func (ld *v2LayerDescriptor) truncateDownloadFile() error { 303 // Need a new hash context since we will be redoing the download 304 ld.verifier = nil 305 306 if _, err := ld.tmpFile.Seek(0, os.SEEK_SET); err != nil { 307 logrus.Errorf("error seeking to beginning of download file: %v", err) 308 return err 309 } 310 311 if err := ld.tmpFile.Truncate(0); err != nil { 312 logrus.Errorf("error truncating download file: %v", err) 313 return err 314 } 315 316 return nil 317 } 318 319 func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) { 320 // Cache mapping from this layer's DiffID to the blobsum 321 ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.Name.Name()}) 322 } 323 324 func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) { 325 manSvc, err := p.repo.Manifests(ctx) 326 if err != nil { 327 return false, err 328 } 329 330 var ( 331 manifest distribution.Manifest 332 tagOrDigest string // Used for logging/progress only 333 ) 334 if tagged, isTagged := ref.(reference.NamedTagged); isTagged { 335 manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag())) 336 if err != nil { 337 return false, allowV1Fallback(err) 338 } 339 tagOrDigest = tagged.Tag() 340 } else if digested, isDigested := ref.(reference.Canonical); isDigested { 341 manifest, err = manSvc.Get(ctx, digested.Digest()) 342 if err != nil { 343 return false, err 344 } 345 tagOrDigest = digested.Digest().String() 346 } else { 347 return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", reference.FamiliarString(ref)) 348 } 349 350 if manifest == nil { 351 return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) 352 } 353 354 if m, ok := manifest.(*schema2.DeserializedManifest); ok { 355 var allowedMediatype bool 356 for _, t := range p.config.Schema2Types { 357 if m.Manifest.Config.MediaType == t { 358 allowedMediatype = true 359 break 360 } 361 } 362 if !allowedMediatype { 363 configClass := mediaTypeClasses[m.Manifest.Config.MediaType] 364 if configClass == "" { 365 configClass = "unknown" 366 } 367 return false, fmt.Errorf("target is %s", configClass) 368 } 369 } 370 371 // If manSvc.Get succeeded, we can be confident that the registry on 372 // the other side speaks the v2 protocol. 373 p.confirmedV2 = true 374 375 logrus.Debugf("Pulling ref from V2 registry: %s", reference.FamiliarString(ref)) 376 progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+reference.FamiliarName(p.repo.Named())) 377 378 var ( 379 id digest.Digest 380 manifestDigest digest.Digest 381 ) 382 383 switch v := manifest.(type) { 384 case *schema1.SignedManifest: 385 if p.config.RequireSchema2 { 386 return false, fmt.Errorf("invalid manifest: not schema2") 387 } 388 id, manifestDigest, err = p.pullSchema1(ctx, ref, v) 389 if err != nil { 390 return false, err 391 } 392 case *schema2.DeserializedManifest: 393 id, manifestDigest, err = p.pullSchema2(ctx, ref, v) 394 if err != nil { 395 return false, err 396 } 397 case *manifestlist.DeserializedManifestList: 398 id, manifestDigest, err = p.pullManifestList(ctx, ref, v) 399 if err != nil { 400 return false, err 401 } 402 default: 403 return false, errors.New("unsupported manifest format") 404 } 405 406 progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String()) 407 408 if p.config.ReferenceStore != nil { 409 oldTagID, err := p.config.ReferenceStore.Get(ref) 410 if err == nil { 411 if oldTagID == id { 412 return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id) 413 } 414 } else if err != refstore.ErrDoesNotExist { 415 return false, err 416 } 417 418 if canonical, ok := ref.(reference.Canonical); ok { 419 if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil { 420 return false, err 421 } 422 } else { 423 if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { 424 return false, err 425 } 426 if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil { 427 return false, err 428 } 429 } 430 } 431 return true, nil 432 } 433 434 func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) { 435 var verifiedManifest *schema1.Manifest 436 verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref) 437 if err != nil { 438 return "", "", err 439 } 440 441 rootFS := image.NewRootFS() 442 443 // remove duplicate layers and check parent chain validity 444 err = fixManifestLayers(verifiedManifest) 445 if err != nil { 446 return "", "", err 447 } 448 449 var descriptors []xfer.DownloadDescriptor 450 451 // Image history converted to the new format 452 var history []image.History 453 454 // Note that the order of this loop is in the direction of bottom-most 455 // to top-most, so that the downloads slice gets ordered correctly. 456 for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { 457 blobSum := verifiedManifest.FSLayers[i].BlobSum 458 459 var throwAway struct { 460 ThrowAway bool `json:"throwaway,omitempty"` 461 } 462 if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { 463 return "", "", err 464 } 465 466 h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) 467 if err != nil { 468 return "", "", err 469 } 470 history = append(history, h) 471 472 if throwAway.ThrowAway { 473 continue 474 } 475 476 layerDescriptor := &v2LayerDescriptor{ 477 digest: blobSum, 478 repoInfo: p.repoInfo, 479 repo: p.repo, 480 V2MetadataService: p.V2MetadataService, 481 } 482 483 descriptors = append(descriptors, layerDescriptor) 484 } 485 486 resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) 487 if err != nil { 488 return "", "", err 489 } 490 defer release() 491 492 config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history) 493 if err != nil { 494 return "", "", err 495 } 496 497 imageID, err := p.config.ImageStore.Put(config) 498 if err != nil { 499 return "", "", err 500 } 501 502 manifestDigest = digest.FromBytes(unverifiedManifest.Canonical) 503 504 return imageID, manifestDigest, nil 505 } 506 507 func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) { 508 manifestDigest, err = schema2ManifestDigest(ref, mfst) 509 if err != nil { 510 return "", "", err 511 } 512 513 target := mfst.Target() 514 if _, err := p.config.ImageStore.Get(target.Digest); err == nil { 515 // If the image already exists locally, no need to pull 516 // anything. 517 return target.Digest, manifestDigest, nil 518 } 519 520 var descriptors []xfer.DownloadDescriptor 521 522 // Note that the order of this loop is in the direction of bottom-most 523 // to top-most, so that the downloads slice gets ordered correctly. 524 for _, d := range mfst.Layers { 525 layerDescriptor := &v2LayerDescriptor{ 526 digest: d.Digest, 527 repo: p.repo, 528 repoInfo: p.repoInfo, 529 V2MetadataService: p.V2MetadataService, 530 src: d, 531 } 532 533 descriptors = append(descriptors, layerDescriptor) 534 } 535 536 configChan := make(chan []byte, 1) 537 configErrChan := make(chan error, 1) 538 layerErrChan := make(chan error, 1) 539 downloadsDone := make(chan struct{}) 540 var cancel func() 541 ctx, cancel = context.WithCancel(ctx) 542 defer cancel() 543 544 // Pull the image config 545 go func() { 546 configJSON, err := p.pullSchema2Config(ctx, target.Digest) 547 if err != nil { 548 configErrChan <- ImageConfigPullError{Err: err} 549 cancel() 550 return 551 } 552 configChan <- configJSON 553 }() 554 555 var ( 556 configJSON []byte // raw serialized image config 557 downloadedRootFS *image.RootFS // rootFS from registered layers 558 configRootFS *image.RootFS // rootFS from configuration 559 release func() // release resources from rootFS download 560 ) 561 562 // https://github.com/docker/docker/issues/24766 - Err on the side of caution, 563 // explicitly blocking images intended for linux from the Windows daemon. On 564 // Windows, we do this before the attempt to download, effectively serialising 565 // the download slightly slowing it down. We have to do it this way, as 566 // chances are the download of layers itself would fail due to file names 567 // which aren't suitable for NTFS. At some point in the future, if a similar 568 // check to block Windows images being pulled on Linux is implemented, it 569 // may be necessary to perform the same type of serialisation. 570 if runtime.GOOS == "windows" { 571 configJSON, configRootFS, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) 572 if err != nil { 573 return "", "", err 574 } 575 576 if configRootFS == nil { 577 return "", "", errRootFSInvalid 578 } 579 } 580 581 if p.config.DownloadManager != nil { 582 go func() { 583 var ( 584 err error 585 rootFS image.RootFS 586 ) 587 downloadRootFS := *image.NewRootFS() 588 rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput) 589 if err != nil { 590 // Intentionally do not cancel the config download here 591 // as the error from config download (if there is one) 592 // is more interesting than the layer download error 593 layerErrChan <- err 594 return 595 } 596 597 downloadedRootFS = &rootFS 598 close(downloadsDone) 599 }() 600 } else { 601 // We have nothing to download 602 close(downloadsDone) 603 } 604 605 if configJSON == nil { 606 configJSON, configRootFS, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) 607 if err == nil && configRootFS == nil { 608 err = errRootFSInvalid 609 } 610 if err != nil { 611 cancel() 612 select { 613 case <-downloadsDone: 614 case <-layerErrChan: 615 } 616 return "", "", err 617 } 618 } 619 620 select { 621 case <-downloadsDone: 622 case err = <-layerErrChan: 623 return "", "", err 624 } 625 626 if release != nil { 627 defer release() 628 } 629 630 if downloadedRootFS != nil { 631 // The DiffIDs returned in rootFS MUST match those in the config. 632 // Otherwise the image config could be referencing layers that aren't 633 // included in the manifest. 634 if len(downloadedRootFS.DiffIDs) != len(configRootFS.DiffIDs) { 635 return "", "", errRootFSMismatch 636 } 637 638 for i := range downloadedRootFS.DiffIDs { 639 if downloadedRootFS.DiffIDs[i] != configRootFS.DiffIDs[i] { 640 return "", "", errRootFSMismatch 641 } 642 } 643 } 644 645 imageID, err := p.config.ImageStore.Put(configJSON) 646 if err != nil { 647 return "", "", err 648 } 649 650 return imageID, manifestDigest, nil 651 } 652 653 func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, error) { 654 select { 655 case configJSON := <-configChan: 656 rootfs, err := s.RootFSFromConfig(configJSON) 657 if err != nil { 658 return nil, nil, err 659 } 660 return configJSON, rootfs, nil 661 case err := <-errChan: 662 return nil, nil, err 663 // Don't need a case for ctx.Done in the select because cancellation 664 // will trigger an error in p.pullSchema2ImageConfig. 665 } 666 } 667 668 // pullManifestList handles "manifest lists" which point to various 669 // platform-specific manifests. 670 func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (id digest.Digest, manifestListDigest digest.Digest, err error) { 671 manifestListDigest, err = schema2ManifestDigest(ref, mfstList) 672 if err != nil { 673 return "", "", err 674 } 675 676 var manifestDigest digest.Digest 677 for _, manifestDescriptor := range mfstList.Manifests { 678 // TODO(aaronl): The manifest list spec supports optional 679 // "features" and "variant" fields. These are not yet used. 680 // Once they are, their values should be interpreted here. 681 if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS { 682 manifestDigest = manifestDescriptor.Digest 683 break 684 } 685 } 686 687 if manifestDigest == "" { 688 return "", "", errors.New("no supported platform found in manifest list") 689 } 690 691 manSvc, err := p.repo.Manifests(ctx) 692 if err != nil { 693 return "", "", err 694 } 695 696 manifest, err := manSvc.Get(ctx, manifestDigest) 697 if err != nil { 698 return "", "", err 699 } 700 701 manifestRef, err := reference.WithDigest(reference.TrimNamed(ref), manifestDigest) 702 if err != nil { 703 return "", "", err 704 } 705 706 switch v := manifest.(type) { 707 case *schema1.SignedManifest: 708 id, _, err = p.pullSchema1(ctx, manifestRef, v) 709 if err != nil { 710 return "", "", err 711 } 712 case *schema2.DeserializedManifest: 713 id, _, err = p.pullSchema2(ctx, manifestRef, v) 714 if err != nil { 715 return "", "", err 716 } 717 default: 718 return "", "", errors.New("unsupported manifest format") 719 } 720 721 return id, manifestListDigest, err 722 } 723 724 func (p *v2Puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) { 725 blobs := p.repo.Blobs(ctx) 726 configJSON, err = blobs.Get(ctx, dgst) 727 if err != nil { 728 return nil, err 729 } 730 731 // Verify image config digest 732 verifier := dgst.Verifier() 733 if _, err := verifier.Write(configJSON); err != nil { 734 return nil, err 735 } 736 if !verifier.Verified() { 737 err := fmt.Errorf("image config verification failed for digest %s", dgst) 738 logrus.Error(err) 739 return nil, err 740 } 741 742 return configJSON, nil 743 } 744 745 // schema2ManifestDigest computes the manifest digest, and, if pulling by 746 // digest, ensures that it matches the requested digest. 747 func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) { 748 _, canonical, err := mfst.Payload() 749 if err != nil { 750 return "", err 751 } 752 753 // If pull by digest, then verify the manifest digest. 754 if digested, isDigested := ref.(reference.Canonical); isDigested { 755 verifier := digested.Digest().Verifier() 756 if _, err := verifier.Write(canonical); err != nil { 757 return "", err 758 } 759 if !verifier.Verified() { 760 err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) 761 logrus.Error(err) 762 return "", err 763 } 764 return digested.Digest(), nil 765 } 766 767 return digest.FromBytes(canonical), nil 768 } 769 770 // allowV1Fallback checks if the error is a possible reason to fallback to v1 771 // (even if confirmedV2 has been set already), and if so, wraps the error in 772 // a fallbackError with confirmedV2 set to false. Otherwise, it returns the 773 // error unmodified. 774 func allowV1Fallback(err error) error { 775 switch v := err.(type) { 776 case errcode.Errors: 777 if len(v) != 0 { 778 if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) { 779 return fallbackError{ 780 err: err, 781 confirmedV2: false, 782 transportOK: true, 783 } 784 } 785 } 786 case errcode.Error: 787 if shouldV2Fallback(v) { 788 return fallbackError{ 789 err: err, 790 confirmedV2: false, 791 transportOK: true, 792 } 793 } 794 case *url.Error: 795 if v.Err == auth.ErrNoBasicAuthCredentials { 796 return fallbackError{err: err, confirmedV2: false} 797 } 798 } 799 800 return err 801 } 802 803 func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) { 804 // If pull by digest, then verify the manifest digest. NOTE: It is 805 // important to do this first, before any other content validation. If the 806 // digest cannot be verified, don't even bother with those other things. 807 if digested, isCanonical := ref.(reference.Canonical); isCanonical { 808 verifier := digested.Digest().Verifier() 809 if _, err := verifier.Write(signedManifest.Canonical); err != nil { 810 return nil, err 811 } 812 if !verifier.Verified() { 813 err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) 814 logrus.Error(err) 815 return nil, err 816 } 817 } 818 m = &signedManifest.Manifest 819 820 if m.SchemaVersion != 1 { 821 return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, reference.FamiliarString(ref)) 822 } 823 if len(m.FSLayers) != len(m.History) { 824 return nil, fmt.Errorf("length of history not equal to number of layers for %q", reference.FamiliarString(ref)) 825 } 826 if len(m.FSLayers) == 0 { 827 return nil, fmt.Errorf("no FSLayers in manifest for %q", reference.FamiliarString(ref)) 828 } 829 return m, nil 830 } 831 832 // fixManifestLayers removes repeated layers from the manifest and checks the 833 // correctness of the parent chain. 834 func fixManifestLayers(m *schema1.Manifest) error { 835 imgs := make([]*image.V1Image, len(m.FSLayers)) 836 for i := range m.FSLayers { 837 img := &image.V1Image{} 838 839 if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { 840 return err 841 } 842 843 imgs[i] = img 844 if err := v1.ValidateID(img.ID); err != nil { 845 return err 846 } 847 } 848 849 if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" { 850 // Windows base layer can point to a base layer parent that is not in manifest. 851 return errors.New("invalid parent ID in the base layer of the image") 852 } 853 854 // check general duplicates to error instead of a deadlock 855 idmap := make(map[string]struct{}) 856 857 var lastID string 858 for _, img := range imgs { 859 // skip IDs that appear after each other, we handle those later 860 if _, exists := idmap[img.ID]; img.ID != lastID && exists { 861 return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) 862 } 863 lastID = img.ID 864 idmap[lastID] = struct{}{} 865 } 866 867 // backwards loop so that we keep the remaining indexes after removing items 868 for i := len(imgs) - 2; i >= 0; i-- { 869 if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue 870 m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) 871 m.History = append(m.History[:i], m.History[i+1:]...) 872 } else if imgs[i].Parent != imgs[i+1].ID { 873 return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent) 874 } 875 } 876 877 return nil 878 } 879 880 func createDownloadFile() (*os.File, error) { 881 return ioutil.TempFile("", "GetImageBlob") 882 }