github.com/mforkel/docker-ce-i386@v17.12.1-ce-rc2+incompatible/components/engine/distribution/pull_v2.go (about) 1 package distribution 2 3 import ( 4 "encoding/json" 5 "fmt" 6 "io" 7 "io/ioutil" 8 "net/url" 9 "os" 10 "runtime" 11 "strings" 12 13 "github.com/docker/distribution" 14 "github.com/docker/distribution/manifest/manifestlist" 15 "github.com/docker/distribution/manifest/schema1" 16 "github.com/docker/distribution/manifest/schema2" 17 "github.com/docker/distribution/reference" 18 "github.com/docker/distribution/registry/api/errcode" 19 "github.com/docker/distribution/registry/client/auth" 20 "github.com/docker/distribution/registry/client/transport" 21 "github.com/docker/docker/distribution/metadata" 22 "github.com/docker/docker/distribution/xfer" 23 "github.com/docker/docker/image" 24 "github.com/docker/docker/image/v1" 25 "github.com/docker/docker/layer" 26 "github.com/docker/docker/pkg/ioutils" 27 "github.com/docker/docker/pkg/progress" 28 "github.com/docker/docker/pkg/stringid" 29 "github.com/docker/docker/pkg/system" 30 refstore "github.com/docker/docker/reference" 31 "github.com/docker/docker/registry" 32 digest "github.com/opencontainers/go-digest" 33 "github.com/pkg/errors" 34 "github.com/sirupsen/logrus" 35 "golang.org/x/net/context" 36 ) 37 38 var ( 39 errRootFSMismatch = errors.New("layers from manifest don't match image configuration") 40 errRootFSInvalid = errors.New("invalid rootfs in image configuration") 41 ) 42 43 // ImageConfigPullError is an error pulling the image config blob 44 // (only applies to schema2). 45 type ImageConfigPullError struct { 46 Err error 47 } 48 49 // Error returns the error string for ImageConfigPullError. 50 func (e ImageConfigPullError) Error() string { 51 return "error pulling image configuration: " + e.Err.Error() 52 } 53 54 type v2Puller struct { 55 V2MetadataService metadata.V2MetadataService 56 endpoint registry.APIEndpoint 57 config *ImagePullConfig 58 repoInfo *registry.RepositoryInfo 59 repo distribution.Repository 60 // confirmedV2 is set to true if we confirm we're talking to a v2 61 // registry. This is used to limit fallbacks to the v1 protocol. 62 confirmedV2 bool 63 } 64 65 func (p *v2Puller) Pull(ctx context.Context, ref reference.Named, platform string) (err error) { 66 // TODO(tiborvass): was ReceiveTimeout 67 p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") 68 if err != nil { 69 logrus.Warnf("Error getting v2 registry: %v", err) 70 return err 71 } 72 73 if err = p.pullV2Repository(ctx, ref, platform); err != nil { 74 if _, ok := err.(fallbackError); ok { 75 return err 76 } 77 if continueOnError(err, p.endpoint.Mirror) { 78 return fallbackError{ 79 err: err, 80 confirmedV2: p.confirmedV2, 81 transportOK: true, 82 } 83 } 84 } 85 return err 86 } 87 88 func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named, platform string) (err error) { 89 var layersDownloaded bool 90 if !reference.IsNameOnly(ref) { 91 layersDownloaded, err = p.pullV2Tag(ctx, ref, platform) 92 if err != nil { 93 return err 94 } 95 } else { 96 tags, err := p.repo.Tags(ctx).All(ctx) 97 if err != nil { 98 // If this repository doesn't exist on V2, we should 99 // permit a fallback to V1. 100 return allowV1Fallback(err) 101 } 102 103 // The v2 registry knows about this repository, so we will not 104 // allow fallback to the v1 protocol even if we encounter an 105 // error later on. 106 p.confirmedV2 = true 107 108 for _, tag := range tags { 109 tagRef, err := reference.WithTag(ref, tag) 110 if err != nil { 111 return err 112 } 113 pulledNew, err := p.pullV2Tag(ctx, tagRef, platform) 114 if err != nil { 115 // Since this is the pull-all-tags case, don't 116 // allow an error pulling a particular tag to 117 // make the whole pull fall back to v1. 118 if fallbackErr, ok := err.(fallbackError); ok { 119 return fallbackErr.err 120 } 121 return err 122 } 123 // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged 124 // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? 125 layersDownloaded = layersDownloaded || pulledNew 126 } 127 } 128 129 writeStatus(reference.FamiliarString(ref), p.config.ProgressOutput, layersDownloaded) 130 131 return nil 132 } 133 134 type v2LayerDescriptor struct { 135 digest digest.Digest 136 diffID layer.DiffID 137 repoInfo *registry.RepositoryInfo 138 repo distribution.Repository 139 V2MetadataService metadata.V2MetadataService 140 tmpFile *os.File 141 verifier digest.Verifier 142 src distribution.Descriptor 143 } 144 145 func (ld *v2LayerDescriptor) Key() string { 146 return "v2:" + ld.digest.String() 147 } 148 149 func (ld *v2LayerDescriptor) ID() string { 150 return stringid.TruncateID(ld.digest.String()) 151 } 152 153 func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) { 154 if ld.diffID != "" { 155 return ld.diffID, nil 156 } 157 return ld.V2MetadataService.GetDiffID(ld.digest) 158 } 159 160 func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { 161 logrus.Debugf("pulling blob %q", ld.digest) 162 163 var ( 164 err error 165 offset int64 166 ) 167 168 if ld.tmpFile == nil { 169 ld.tmpFile, err = createDownloadFile() 170 if err != nil { 171 return nil, 0, xfer.DoNotRetry{Err: err} 172 } 173 } else { 174 offset, err = ld.tmpFile.Seek(0, os.SEEK_END) 175 if err != nil { 176 logrus.Debugf("error seeking to end of download file: %v", err) 177 offset = 0 178 179 ld.tmpFile.Close() 180 if err := os.Remove(ld.tmpFile.Name()); err != nil { 181 logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) 182 } 183 ld.tmpFile, err = createDownloadFile() 184 if err != nil { 185 return nil, 0, xfer.DoNotRetry{Err: err} 186 } 187 } else if offset != 0 { 188 logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset) 189 } 190 } 191 192 tmpFile := ld.tmpFile 193 194 layerDownload, err := ld.open(ctx) 195 if err != nil { 196 logrus.Errorf("Error initiating layer download: %v", err) 197 return nil, 0, retryOnError(err) 198 } 199 200 if offset != 0 { 201 _, err := layerDownload.Seek(offset, os.SEEK_SET) 202 if err != nil { 203 if err := ld.truncateDownloadFile(); err != nil { 204 return nil, 0, xfer.DoNotRetry{Err: err} 205 } 206 return nil, 0, err 207 } 208 } 209 size, err := layerDownload.Seek(0, os.SEEK_END) 210 if err != nil { 211 // Seek failed, perhaps because there was no Content-Length 212 // header. This shouldn't fail the download, because we can 213 // still continue without a progress bar. 214 size = 0 215 } else { 216 if size != 0 && offset > size { 217 logrus.Debug("Partial download is larger than full blob. Starting over") 218 offset = 0 219 if err := ld.truncateDownloadFile(); err != nil { 220 return nil, 0, xfer.DoNotRetry{Err: err} 221 } 222 } 223 224 // Restore the seek offset either at the beginning of the 225 // stream, or just after the last byte we have from previous 226 // attempts. 227 _, err = layerDownload.Seek(offset, os.SEEK_SET) 228 if err != nil { 229 return nil, 0, err 230 } 231 } 232 233 reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading") 234 defer reader.Close() 235 236 if ld.verifier == nil { 237 ld.verifier = ld.digest.Verifier() 238 } 239 240 _, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier)) 241 if err != nil { 242 if err == transport.ErrWrongCodeForByteRange { 243 if err := ld.truncateDownloadFile(); err != nil { 244 return nil, 0, xfer.DoNotRetry{Err: err} 245 } 246 return nil, 0, err 247 } 248 return nil, 0, retryOnError(err) 249 } 250 251 progress.Update(progressOutput, ld.ID(), "Verifying Checksum") 252 253 if !ld.verifier.Verified() { 254 err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest) 255 logrus.Error(err) 256 257 // Allow a retry if this digest verification error happened 258 // after a resumed download. 259 if offset != 0 { 260 if err := ld.truncateDownloadFile(); err != nil { 261 return nil, 0, xfer.DoNotRetry{Err: err} 262 } 263 264 return nil, 0, err 265 } 266 return nil, 0, xfer.DoNotRetry{Err: err} 267 } 268 269 progress.Update(progressOutput, ld.ID(), "Download complete") 270 271 logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) 272 273 _, err = tmpFile.Seek(0, os.SEEK_SET) 274 if err != nil { 275 tmpFile.Close() 276 if err := os.Remove(tmpFile.Name()); err != nil { 277 logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) 278 } 279 ld.tmpFile = nil 280 ld.verifier = nil 281 return nil, 0, xfer.DoNotRetry{Err: err} 282 } 283 284 // hand off the temporary file to the download manager, so it will only 285 // be closed once 286 ld.tmpFile = nil 287 288 return ioutils.NewReadCloserWrapper(tmpFile, func() error { 289 tmpFile.Close() 290 err := os.RemoveAll(tmpFile.Name()) 291 if err != nil { 292 logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) 293 } 294 return err 295 }), size, nil 296 } 297 298 func (ld *v2LayerDescriptor) Close() { 299 if ld.tmpFile != nil { 300 ld.tmpFile.Close() 301 if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { 302 logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) 303 } 304 } 305 } 306 307 func (ld *v2LayerDescriptor) truncateDownloadFile() error { 308 // Need a new hash context since we will be redoing the download 309 ld.verifier = nil 310 311 if _, err := ld.tmpFile.Seek(0, os.SEEK_SET); err != nil { 312 logrus.Errorf("error seeking to beginning of download file: %v", err) 313 return err 314 } 315 316 if err := ld.tmpFile.Truncate(0); err != nil { 317 logrus.Errorf("error truncating download file: %v", err) 318 return err 319 } 320 321 return nil 322 } 323 324 func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) { 325 // Cache mapping from this layer's DiffID to the blobsum 326 ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.Name.Name()}) 327 } 328 329 func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named, os string) (tagUpdated bool, err error) { 330 manSvc, err := p.repo.Manifests(ctx) 331 if err != nil { 332 return false, err 333 } 334 335 var ( 336 manifest distribution.Manifest 337 tagOrDigest string // Used for logging/progress only 338 ) 339 if digested, isDigested := ref.(reference.Canonical); isDigested { 340 manifest, err = manSvc.Get(ctx, digested.Digest()) 341 if err != nil { 342 return false, err 343 } 344 tagOrDigest = digested.Digest().String() 345 } else if tagged, isTagged := ref.(reference.NamedTagged); isTagged { 346 manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag())) 347 if err != nil { 348 return false, allowV1Fallback(err) 349 } 350 tagOrDigest = tagged.Tag() 351 } else { 352 return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", reference.FamiliarString(ref)) 353 } 354 355 if manifest == nil { 356 return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) 357 } 358 359 if m, ok := manifest.(*schema2.DeserializedManifest); ok { 360 var allowedMediatype bool 361 for _, t := range p.config.Schema2Types { 362 if m.Manifest.Config.MediaType == t { 363 allowedMediatype = true 364 break 365 } 366 } 367 if !allowedMediatype { 368 configClass := mediaTypeClasses[m.Manifest.Config.MediaType] 369 if configClass == "" { 370 configClass = "unknown" 371 } 372 return false, invalidManifestClassError{m.Manifest.Config.MediaType, configClass} 373 } 374 } 375 376 // If manSvc.Get succeeded, we can be confident that the registry on 377 // the other side speaks the v2 protocol. 378 p.confirmedV2 = true 379 380 logrus.Debugf("Pulling ref from V2 registry: %s", reference.FamiliarString(ref)) 381 progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+reference.FamiliarName(p.repo.Named())) 382 383 var ( 384 id digest.Digest 385 manifestDigest digest.Digest 386 ) 387 388 switch v := manifest.(type) { 389 case *schema1.SignedManifest: 390 if p.config.RequireSchema2 { 391 return false, fmt.Errorf("invalid manifest: not schema2") 392 } 393 id, manifestDigest, err = p.pullSchema1(ctx, ref, v, os) 394 if err != nil { 395 return false, err 396 } 397 case *schema2.DeserializedManifest: 398 id, manifestDigest, err = p.pullSchema2(ctx, ref, v, os) 399 if err != nil { 400 return false, err 401 } 402 case *manifestlist.DeserializedManifestList: 403 id, manifestDigest, err = p.pullManifestList(ctx, ref, v, os) 404 if err != nil { 405 return false, err 406 } 407 default: 408 return false, invalidManifestFormatError{} 409 } 410 411 progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String()) 412 413 if p.config.ReferenceStore != nil { 414 oldTagID, err := p.config.ReferenceStore.Get(ref) 415 if err == nil { 416 if oldTagID == id { 417 return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id) 418 } 419 } else if err != refstore.ErrDoesNotExist { 420 return false, err 421 } 422 423 if canonical, ok := ref.(reference.Canonical); ok { 424 if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil { 425 return false, err 426 } 427 } else { 428 if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { 429 return false, err 430 } 431 if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil { 432 return false, err 433 } 434 } 435 } 436 return true, nil 437 } 438 439 func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Reference, unverifiedManifest *schema1.SignedManifest, requestedOS string) (id digest.Digest, manifestDigest digest.Digest, err error) { 440 var verifiedManifest *schema1.Manifest 441 verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref) 442 if err != nil { 443 return "", "", err 444 } 445 446 rootFS := image.NewRootFS() 447 448 // remove duplicate layers and check parent chain validity 449 err = fixManifestLayers(verifiedManifest) 450 if err != nil { 451 return "", "", err 452 } 453 454 var descriptors []xfer.DownloadDescriptor 455 456 // Image history converted to the new format 457 var history []image.History 458 459 // Note that the order of this loop is in the direction of bottom-most 460 // to top-most, so that the downloads slice gets ordered correctly. 461 for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { 462 blobSum := verifiedManifest.FSLayers[i].BlobSum 463 464 var throwAway struct { 465 ThrowAway bool `json:"throwaway,omitempty"` 466 } 467 if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { 468 return "", "", err 469 } 470 471 h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) 472 if err != nil { 473 return "", "", err 474 } 475 history = append(history, h) 476 477 if throwAway.ThrowAway { 478 continue 479 } 480 481 layerDescriptor := &v2LayerDescriptor{ 482 digest: blobSum, 483 repoInfo: p.repoInfo, 484 repo: p.repo, 485 V2MetadataService: p.V2MetadataService, 486 } 487 488 descriptors = append(descriptors, layerDescriptor) 489 } 490 491 // The v1 manifest itself doesn't directly contain a platform. However, 492 // the history does, but unfortunately that's a string, so search through 493 // all the history until hopefully we find one which indicates the os. 494 // supertest2014/nyan is an example of a registry image with schemav1. 495 configOS := runtime.GOOS 496 if system.LCOWSupported() { 497 type config struct { 498 Os string `json:"os,omitempty"` 499 } 500 for _, v := range verifiedManifest.History { 501 var c config 502 if err := json.Unmarshal([]byte(v.V1Compatibility), &c); err == nil { 503 if c.Os != "" { 504 configOS = c.Os 505 break 506 } 507 } 508 } 509 } 510 511 // Early bath if the requested OS doesn't match that of the configuration. 512 // This avoids doing the download, only to potentially fail later. 513 if !strings.EqualFold(configOS, requestedOS) { 514 return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configOS, requestedOS) 515 } 516 517 resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, layer.OS(configOS), descriptors, p.config.ProgressOutput) 518 if err != nil { 519 return "", "", err 520 } 521 defer release() 522 523 config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history) 524 if err != nil { 525 return "", "", err 526 } 527 528 imageID, err := p.config.ImageStore.Put(config) 529 if err != nil { 530 return "", "", err 531 } 532 533 manifestDigest = digest.FromBytes(unverifiedManifest.Canonical) 534 535 return imageID, manifestDigest, nil 536 } 537 538 func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest, requestedOS string) (id digest.Digest, manifestDigest digest.Digest, err error) { 539 manifestDigest, err = schema2ManifestDigest(ref, mfst) 540 if err != nil { 541 return "", "", err 542 } 543 544 target := mfst.Target() 545 if _, err := p.config.ImageStore.Get(target.Digest); err == nil { 546 // If the image already exists locally, no need to pull 547 // anything. 548 return target.Digest, manifestDigest, nil 549 } 550 551 var descriptors []xfer.DownloadDescriptor 552 553 // Note that the order of this loop is in the direction of bottom-most 554 // to top-most, so that the downloads slice gets ordered correctly. 555 for _, d := range mfst.Layers { 556 layerDescriptor := &v2LayerDescriptor{ 557 digest: d.Digest, 558 repo: p.repo, 559 repoInfo: p.repoInfo, 560 V2MetadataService: p.V2MetadataService, 561 src: d, 562 } 563 564 descriptors = append(descriptors, layerDescriptor) 565 } 566 567 configChan := make(chan []byte, 1) 568 configErrChan := make(chan error, 1) 569 layerErrChan := make(chan error, 1) 570 downloadsDone := make(chan struct{}) 571 var cancel func() 572 ctx, cancel = context.WithCancel(ctx) 573 defer cancel() 574 575 // Pull the image config 576 go func() { 577 configJSON, err := p.pullSchema2Config(ctx, target.Digest) 578 if err != nil { 579 configErrChan <- ImageConfigPullError{Err: err} 580 cancel() 581 return 582 } 583 configChan <- configJSON 584 }() 585 586 var ( 587 configJSON []byte // raw serialized image config 588 downloadedRootFS *image.RootFS // rootFS from registered layers 589 configRootFS *image.RootFS // rootFS from configuration 590 release func() // release resources from rootFS download 591 configOS layer.OS // for LCOW when registering downloaded layers 592 ) 593 594 // https://github.com/docker/docker/issues/24766 - Err on the side of caution, 595 // explicitly blocking images intended for linux from the Windows daemon. On 596 // Windows, we do this before the attempt to download, effectively serialising 597 // the download slightly slowing it down. We have to do it this way, as 598 // chances are the download of layers itself would fail due to file names 599 // which aren't suitable for NTFS. At some point in the future, if a similar 600 // check to block Windows images being pulled on Linux is implemented, it 601 // may be necessary to perform the same type of serialisation. 602 if runtime.GOOS == "windows" { 603 configJSON, configRootFS, configOS, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) 604 if err != nil { 605 return "", "", err 606 } 607 608 if configRootFS == nil { 609 return "", "", errRootFSInvalid 610 } 611 612 if len(descriptors) != len(configRootFS.DiffIDs) { 613 return "", "", errRootFSMismatch 614 } 615 616 // Early bath if the requested OS doesn't match that of the configuration. 617 // This avoids doing the download, only to potentially fail later. 618 if !strings.EqualFold(string(configOS), requestedOS) { 619 return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configOS, requestedOS) 620 } 621 622 // Populate diff ids in descriptors to avoid downloading foreign layers 623 // which have been side loaded 624 for i := range descriptors { 625 descriptors[i].(*v2LayerDescriptor).diffID = configRootFS.DiffIDs[i] 626 } 627 } 628 629 if p.config.DownloadManager != nil { 630 go func() { 631 var ( 632 err error 633 rootFS image.RootFS 634 ) 635 downloadRootFS := *image.NewRootFS() 636 rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, layer.OS(requestedOS), descriptors, p.config.ProgressOutput) 637 if err != nil { 638 // Intentionally do not cancel the config download here 639 // as the error from config download (if there is one) 640 // is more interesting than the layer download error 641 layerErrChan <- err 642 return 643 } 644 645 downloadedRootFS = &rootFS 646 close(downloadsDone) 647 }() 648 } else { 649 // We have nothing to download 650 close(downloadsDone) 651 } 652 653 if configJSON == nil { 654 configJSON, configRootFS, _, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) 655 if err == nil && configRootFS == nil { 656 err = errRootFSInvalid 657 } 658 if err != nil { 659 cancel() 660 select { 661 case <-downloadsDone: 662 case <-layerErrChan: 663 } 664 return "", "", err 665 } 666 } 667 668 select { 669 case <-downloadsDone: 670 case err = <-layerErrChan: 671 return "", "", err 672 } 673 674 if release != nil { 675 defer release() 676 } 677 678 if downloadedRootFS != nil { 679 // The DiffIDs returned in rootFS MUST match those in the config. 680 // Otherwise the image config could be referencing layers that aren't 681 // included in the manifest. 682 if len(downloadedRootFS.DiffIDs) != len(configRootFS.DiffIDs) { 683 return "", "", errRootFSMismatch 684 } 685 686 for i := range downloadedRootFS.DiffIDs { 687 if downloadedRootFS.DiffIDs[i] != configRootFS.DiffIDs[i] { 688 return "", "", errRootFSMismatch 689 } 690 } 691 } 692 693 imageID, err := p.config.ImageStore.Put(configJSON) 694 if err != nil { 695 return "", "", err 696 } 697 698 return imageID, manifestDigest, nil 699 } 700 701 func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, layer.OS, error) { 702 select { 703 case configJSON := <-configChan: 704 rootfs, os, err := s.RootFSAndOSFromConfig(configJSON) 705 if err != nil { 706 return nil, nil, "", err 707 } 708 return configJSON, rootfs, os, nil 709 case err := <-errChan: 710 return nil, nil, "", err 711 // Don't need a case for ctx.Done in the select because cancellation 712 // will trigger an error in p.pullSchema2ImageConfig. 713 } 714 } 715 716 // pullManifestList handles "manifest lists" which point to various 717 // platform-specific manifests. 718 func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList, os string) (id digest.Digest, manifestListDigest digest.Digest, err error) { 719 manifestListDigest, err = schema2ManifestDigest(ref, mfstList) 720 if err != nil { 721 return "", "", err 722 } 723 724 logrus.Debugf("%s resolved to a manifestList object with %d entries; looking for a %s/%s match", ref, len(mfstList.Manifests), os, runtime.GOARCH) 725 726 manifestMatches := filterManifests(mfstList.Manifests, os) 727 728 if len(manifestMatches) == 0 { 729 errMsg := fmt.Sprintf("no matching manifest for %s/%s in the manifest list entries", os, runtime.GOARCH) 730 logrus.Debugf(errMsg) 731 return "", "", errors.New(errMsg) 732 } 733 734 if len(manifestMatches) > 1 { 735 logrus.Debugf("found multiple matches in manifest list, choosing best match %s", manifestMatches[0].Digest.String()) 736 } 737 manifestDigest := manifestMatches[0].Digest 738 739 manSvc, err := p.repo.Manifests(ctx) 740 if err != nil { 741 return "", "", err 742 } 743 744 manifest, err := manSvc.Get(ctx, manifestDigest) 745 if err != nil { 746 return "", "", err 747 } 748 749 manifestRef, err := reference.WithDigest(reference.TrimNamed(ref), manifestDigest) 750 if err != nil { 751 return "", "", err 752 } 753 754 switch v := manifest.(type) { 755 case *schema1.SignedManifest: 756 id, _, err = p.pullSchema1(ctx, manifestRef, v, os) 757 if err != nil { 758 return "", "", err 759 } 760 case *schema2.DeserializedManifest: 761 id, _, err = p.pullSchema2(ctx, manifestRef, v, os) 762 if err != nil { 763 return "", "", err 764 } 765 default: 766 return "", "", errors.New("unsupported manifest format") 767 } 768 769 return id, manifestListDigest, err 770 } 771 772 func (p *v2Puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) { 773 blobs := p.repo.Blobs(ctx) 774 configJSON, err = blobs.Get(ctx, dgst) 775 if err != nil { 776 return nil, err 777 } 778 779 // Verify image config digest 780 verifier := dgst.Verifier() 781 if _, err := verifier.Write(configJSON); err != nil { 782 return nil, err 783 } 784 if !verifier.Verified() { 785 err := fmt.Errorf("image config verification failed for digest %s", dgst) 786 logrus.Error(err) 787 return nil, err 788 } 789 790 return configJSON, nil 791 } 792 793 // schema2ManifestDigest computes the manifest digest, and, if pulling by 794 // digest, ensures that it matches the requested digest. 795 func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) { 796 _, canonical, err := mfst.Payload() 797 if err != nil { 798 return "", err 799 } 800 801 // If pull by digest, then verify the manifest digest. 802 if digested, isDigested := ref.(reference.Canonical); isDigested { 803 verifier := digested.Digest().Verifier() 804 if _, err := verifier.Write(canonical); err != nil { 805 return "", err 806 } 807 if !verifier.Verified() { 808 err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) 809 logrus.Error(err) 810 return "", err 811 } 812 return digested.Digest(), nil 813 } 814 815 return digest.FromBytes(canonical), nil 816 } 817 818 // allowV1Fallback checks if the error is a possible reason to fallback to v1 819 // (even if confirmedV2 has been set already), and if so, wraps the error in 820 // a fallbackError with confirmedV2 set to false. Otherwise, it returns the 821 // error unmodified. 822 func allowV1Fallback(err error) error { 823 switch v := err.(type) { 824 case errcode.Errors: 825 if len(v) != 0 { 826 if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) { 827 return fallbackError{ 828 err: err, 829 confirmedV2: false, 830 transportOK: true, 831 } 832 } 833 } 834 case errcode.Error: 835 if shouldV2Fallback(v) { 836 return fallbackError{ 837 err: err, 838 confirmedV2: false, 839 transportOK: true, 840 } 841 } 842 case *url.Error: 843 if v.Err == auth.ErrNoBasicAuthCredentials { 844 return fallbackError{err: err, confirmedV2: false} 845 } 846 } 847 848 return err 849 } 850 851 func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Reference) (m *schema1.Manifest, err error) { 852 // If pull by digest, then verify the manifest digest. NOTE: It is 853 // important to do this first, before any other content validation. If the 854 // digest cannot be verified, don't even bother with those other things. 855 if digested, isCanonical := ref.(reference.Canonical); isCanonical { 856 verifier := digested.Digest().Verifier() 857 if _, err := verifier.Write(signedManifest.Canonical); err != nil { 858 return nil, err 859 } 860 if !verifier.Verified() { 861 err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) 862 logrus.Error(err) 863 return nil, err 864 } 865 } 866 m = &signedManifest.Manifest 867 868 if m.SchemaVersion != 1 { 869 return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, reference.FamiliarString(ref)) 870 } 871 if len(m.FSLayers) != len(m.History) { 872 return nil, fmt.Errorf("length of history not equal to number of layers for %q", reference.FamiliarString(ref)) 873 } 874 if len(m.FSLayers) == 0 { 875 return nil, fmt.Errorf("no FSLayers in manifest for %q", reference.FamiliarString(ref)) 876 } 877 return m, nil 878 } 879 880 // fixManifestLayers removes repeated layers from the manifest and checks the 881 // correctness of the parent chain. 882 func fixManifestLayers(m *schema1.Manifest) error { 883 imgs := make([]*image.V1Image, len(m.FSLayers)) 884 for i := range m.FSLayers { 885 img := &image.V1Image{} 886 887 if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { 888 return err 889 } 890 891 imgs[i] = img 892 if err := v1.ValidateID(img.ID); err != nil { 893 return err 894 } 895 } 896 897 if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" { 898 // Windows base layer can point to a base layer parent that is not in manifest. 899 return errors.New("invalid parent ID in the base layer of the image") 900 } 901 902 // check general duplicates to error instead of a deadlock 903 idmap := make(map[string]struct{}) 904 905 var lastID string 906 for _, img := range imgs { 907 // skip IDs that appear after each other, we handle those later 908 if _, exists := idmap[img.ID]; img.ID != lastID && exists { 909 return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) 910 } 911 lastID = img.ID 912 idmap[lastID] = struct{}{} 913 } 914 915 // backwards loop so that we keep the remaining indexes after removing items 916 for i := len(imgs) - 2; i >= 0; i-- { 917 if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue 918 m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) 919 m.History = append(m.History[:i], m.History[i+1:]...) 920 } else if imgs[i].Parent != imgs[i+1].ID { 921 return fmt.Errorf("invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent) 922 } 923 } 924 925 return nil 926 } 927 928 func createDownloadFile() (*os.File, error) { 929 return ioutil.TempFile("", "GetImageBlob") 930 }