github.com/vikstrous/docker@v1.8.2/graph/pull_v2.go (about) 1 package graph 2 3 import ( 4 "errors" 5 "fmt" 6 "io" 7 "io/ioutil" 8 "os" 9 10 "github.com/Sirupsen/logrus" 11 "github.com/docker/distribution" 12 "github.com/docker/distribution/digest" 13 "github.com/docker/distribution/manifest" 14 "github.com/docker/docker/image" 15 "github.com/docker/docker/pkg/progressreader" 16 "github.com/docker/docker/pkg/streamformatter" 17 "github.com/docker/docker/pkg/stringid" 18 "github.com/docker/docker/registry" 19 "github.com/docker/docker/trust" 20 "github.com/docker/docker/utils" 21 "github.com/docker/libtrust" 22 "golang.org/x/net/context" 23 ) 24 25 type v2Puller struct { 26 *TagStore 27 endpoint registry.APIEndpoint 28 config *ImagePullConfig 29 sf *streamformatter.StreamFormatter 30 repoInfo *registry.RepositoryInfo 31 repo distribution.Repository 32 sessionID string 33 } 34 35 func (p *v2Puller) Pull(tag string) (fallback bool, err error) { 36 // TODO(tiborvass): was ReceiveTimeout 37 p.repo, err = NewV2Repository(p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig) 38 if err != nil { 39 logrus.Debugf("Error getting v2 registry: %v", err) 40 return true, err 41 } 42 43 p.sessionID = stringid.GenerateRandomID() 44 45 if err := p.pullV2Repository(tag); err != nil { 46 if registry.ContinueOnError(err) { 47 logrus.Debugf("Error trying v2 registry: %v", err) 48 return true, err 49 } 50 return false, err 51 } 52 return false, nil 53 } 54 55 func (p *v2Puller) pullV2Repository(tag string) (err error) { 56 var tags []string 57 taggedName := p.repoInfo.LocalName 58 if len(tag) > 0 { 59 tags = []string{tag} 60 taggedName = utils.ImageReference(p.repoInfo.LocalName, tag) 61 } else { 62 var err error 63 64 manSvc, err := p.repo.Manifests(context.Background()) 65 if err != nil { 66 return err 67 } 68 69 tags, err = manSvc.Tags() 70 if err != nil { 71 return err 72 } 73 74 } 75 76 c, err := p.poolAdd("pull", taggedName) 77 if err != nil { 78 if c != nil { 79 // Another pull of the same repository is already taking place; just wait for it to finish 80 p.config.OutStream.Write(p.sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", p.repoInfo.CanonicalName)) 81 <-c 82 return nil 83 } 84 return err 85 } 86 defer p.poolRemove("pull", taggedName) 87 88 var layersDownloaded bool 89 for _, tag := range tags { 90 // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged 91 // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? 92 pulledNew, err := p.pullV2Tag(tag, taggedName) 93 if err != nil { 94 return err 95 } 96 layersDownloaded = layersDownloaded || pulledNew 97 } 98 99 WriteStatus(taggedName, p.config.OutStream, p.sf, layersDownloaded) 100 101 return nil 102 } 103 104 // downloadInfo is used to pass information from download to extractor 105 type downloadInfo struct { 106 img *image.Image 107 tmpFile *os.File 108 digest digest.Digest 109 layer distribution.ReadSeekCloser 110 size int64 111 err chan error 112 out io.Writer // Download progress is written here. 113 } 114 115 type errVerification struct{} 116 117 func (errVerification) Error() string { return "verification failed" } 118 119 func (p *v2Puller) download(di *downloadInfo) { 120 logrus.Debugf("pulling blob %q to %s", di.digest, di.img.ID) 121 122 out := di.out 123 124 if c, err := p.poolAdd("pull", "img:"+di.img.ID); err != nil { 125 if c != nil { 126 out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Layer already being pulled by another client. Waiting.", nil)) 127 <-c 128 out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Download complete", nil)) 129 } else { 130 logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", di.img.ID, err) 131 } 132 di.err <- nil 133 return 134 } 135 136 defer p.poolRemove("pull", "img:"+di.img.ID) 137 tmpFile, err := ioutil.TempFile("", "GetImageBlob") 138 if err != nil { 139 di.err <- err 140 return 141 } 142 143 blobs := p.repo.Blobs(context.Background()) 144 145 desc, err := blobs.Stat(context.Background(), di.digest) 146 if err != nil { 147 logrus.Debugf("Error statting layer: %v", err) 148 di.err <- err 149 return 150 } 151 di.size = desc.Size 152 153 layerDownload, err := blobs.Open(context.Background(), di.digest) 154 if err != nil { 155 logrus.Debugf("Error fetching layer: %v", err) 156 di.err <- err 157 return 158 } 159 defer layerDownload.Close() 160 161 verifier, err := digest.NewDigestVerifier(di.digest) 162 if err != nil { 163 di.err <- err 164 return 165 } 166 167 reader := progressreader.New(progressreader.Config{ 168 In: ioutil.NopCloser(io.TeeReader(layerDownload, verifier)), 169 Out: out, 170 Formatter: p.sf, 171 Size: int(di.size), 172 NewLines: false, 173 ID: stringid.TruncateID(di.img.ID), 174 Action: "Downloading", 175 }) 176 io.Copy(tmpFile, reader) 177 178 out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Verifying Checksum", nil)) 179 180 if !verifier.Verified() { 181 err = fmt.Errorf("filesystem layer verification failed for digest %s", di.digest) 182 logrus.Error(err) 183 di.err <- err 184 return 185 } 186 187 out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Download complete", nil)) 188 189 logrus.Debugf("Downloaded %s to tempfile %s", di.img.ID, tmpFile.Name()) 190 di.tmpFile = tmpFile 191 di.layer = layerDownload 192 193 di.err <- nil 194 } 195 196 func (p *v2Puller) pullV2Tag(tag, taggedName string) (verified bool, err error) { 197 logrus.Debugf("Pulling tag from V2 registry: %q", tag) 198 out := p.config.OutStream 199 200 manSvc, err := p.repo.Manifests(context.Background()) 201 if err != nil { 202 return false, err 203 } 204 205 manifest, err := manSvc.GetByTag(tag) 206 if err != nil { 207 return false, err 208 } 209 verified, err = p.validateManifest(manifest, tag) 210 if err != nil { 211 return false, err 212 } 213 if verified { 214 logrus.Printf("Image manifest for %s has been verified", taggedName) 215 } 216 217 // By using a pipeWriter for each of the downloads to write their progress 218 // to, we can avoid an issue where this function returns an error but 219 // leaves behind running download goroutines. By splitting the writer 220 // with a pipe, we can close the pipe if there is any error, consequently 221 // causing each download to cancel due to an error writing to this pipe. 222 pipeReader, pipeWriter := io.Pipe() 223 go func() { 224 if _, err := io.Copy(out, pipeReader); err != nil { 225 logrus.Errorf("error copying from layer download progress reader: %s", err) 226 if err := pipeReader.CloseWithError(err); err != nil { 227 logrus.Errorf("error closing the progress reader: %s", err) 228 } 229 } 230 }() 231 defer func() { 232 if err != nil { 233 // All operations on the pipe are synchronous. This call will wait 234 // until all current readers/writers are done using the pipe then 235 // set the error. All successive reads/writes will return with this 236 // error. 237 pipeWriter.CloseWithError(errors.New("download canceled")) 238 } else { 239 // If no error then just close the pipe. 240 pipeWriter.Close() 241 } 242 }() 243 244 out.Write(p.sf.FormatStatus(tag, "Pulling from %s", p.repo.Name())) 245 246 downloads := make([]downloadInfo, len(manifest.FSLayers)) 247 248 layerIDs := []string{} 249 defer func() { 250 p.graph.Release(p.sessionID, layerIDs...) 251 }() 252 253 for i := len(manifest.FSLayers) - 1; i >= 0; i-- { 254 img, err := image.NewImgJSON([]byte(manifest.History[i].V1Compatibility)) 255 if err != nil { 256 logrus.Debugf("error getting image v1 json: %v", err) 257 return false, err 258 } 259 downloads[i].img = img 260 downloads[i].digest = manifest.FSLayers[i].BlobSum 261 262 p.graph.Retain(p.sessionID, img.ID) 263 layerIDs = append(layerIDs, img.ID) 264 265 // Check if exists 266 if p.graph.Exists(img.ID) { 267 logrus.Debugf("Image already exists: %s", img.ID) 268 continue 269 } 270 271 out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pulling fs layer", nil)) 272 273 downloads[i].err = make(chan error) 274 downloads[i].out = pipeWriter 275 go p.download(&downloads[i]) 276 } 277 278 var tagUpdated bool 279 for i := len(downloads) - 1; i >= 0; i-- { 280 d := &downloads[i] 281 if d.err != nil { 282 if err := <-d.err; err != nil { 283 return false, err 284 } 285 } 286 if d.layer != nil { 287 // if tmpFile is empty assume download and extracted elsewhere 288 defer os.Remove(d.tmpFile.Name()) 289 defer d.tmpFile.Close() 290 d.tmpFile.Seek(0, 0) 291 if d.tmpFile != nil { 292 293 reader := progressreader.New(progressreader.Config{ 294 In: d.tmpFile, 295 Out: out, 296 Formatter: p.sf, 297 Size: int(d.size), 298 NewLines: false, 299 ID: stringid.TruncateID(d.img.ID), 300 Action: "Extracting", 301 }) 302 303 err = p.graph.Register(d.img, reader) 304 if err != nil { 305 return false, err 306 } 307 308 if err := p.graph.SetDigest(d.img.ID, d.digest); err != nil { 309 return false, err 310 } 311 312 // FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted) 313 } 314 out.Write(p.sf.FormatProgress(stringid.TruncateID(d.img.ID), "Pull complete", nil)) 315 tagUpdated = true 316 } else { 317 out.Write(p.sf.FormatProgress(stringid.TruncateID(d.img.ID), "Already exists", nil)) 318 } 319 } 320 321 manifestDigest, _, err := digestFromManifest(manifest, p.repoInfo.LocalName) 322 if err != nil { 323 return false, err 324 } 325 326 // Check for new tag if no layers downloaded 327 if !tagUpdated { 328 repo, err := p.Get(p.repoInfo.LocalName) 329 if err != nil { 330 return false, err 331 } 332 if repo != nil { 333 if _, exists := repo[tag]; !exists { 334 tagUpdated = true 335 } 336 } else { 337 tagUpdated = true 338 } 339 } 340 341 if verified && tagUpdated { 342 out.Write(p.sf.FormatStatus(p.repo.Name()+":"+tag, "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security.")) 343 } 344 345 if utils.DigestReference(tag) { 346 // TODO(stevvooe): Ideally, we should always set the digest so we can 347 // use the digest whether we pull by it or not. Unfortunately, the tag 348 // store treats the digest as a separate tag, meaning there may be an 349 // untagged digest image that would seem to be dangling by a user. 350 if err = p.SetDigest(p.repoInfo.LocalName, tag, downloads[0].img.ID); err != nil { 351 return false, err 352 } 353 } else { 354 // only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest) 355 if err = p.Tag(p.repoInfo.LocalName, tag, downloads[0].img.ID, true); err != nil { 356 return false, err 357 } 358 } 359 360 if manifestDigest != "" { 361 out.Write(p.sf.FormatStatus("", "Digest: %s", manifestDigest)) 362 } 363 364 return tagUpdated, nil 365 } 366 367 // verifyTrustedKeys checks the keys provided against the trust store, 368 // ensuring that the provided keys are trusted for the namespace. The keys 369 // provided from this method must come from the signatures provided as part of 370 // the manifest JWS package, obtained from unpackSignedManifest or libtrust. 371 func (p *v2Puller) verifyTrustedKeys(namespace string, keys []libtrust.PublicKey) (verified bool, err error) { 372 if namespace[0] != '/' { 373 namespace = "/" + namespace 374 } 375 376 for _, key := range keys { 377 b, err := key.MarshalJSON() 378 if err != nil { 379 return false, fmt.Errorf("error marshalling public key: %s", err) 380 } 381 // Check key has read/write permission (0x03) 382 v, err := p.trustService.CheckKey(namespace, b, 0x03) 383 if err != nil { 384 vErr, ok := err.(trust.NotVerifiedError) 385 if !ok { 386 return false, fmt.Errorf("error running key check: %s", err) 387 } 388 logrus.Debugf("Key check result: %v", vErr) 389 } 390 verified = v 391 } 392 393 if verified { 394 logrus.Debug("Key check result: verified") 395 } 396 397 return 398 } 399 400 func (p *v2Puller) validateManifest(m *manifest.SignedManifest, tag string) (verified bool, err error) { 401 // If pull by digest, then verify the manifest digest. NOTE: It is 402 // important to do this first, before any other content validation. If the 403 // digest cannot be verified, don't even bother with those other things. 404 if manifestDigest, err := digest.ParseDigest(tag); err == nil { 405 verifier, err := digest.NewDigestVerifier(manifestDigest) 406 if err != nil { 407 return false, err 408 } 409 payload, err := m.Payload() 410 if err != nil { 411 return false, err 412 } 413 if _, err := verifier.Write(payload); err != nil { 414 return false, err 415 } 416 if !verifier.Verified() { 417 err := fmt.Errorf("image verification failed for digest %s", manifestDigest) 418 logrus.Error(err) 419 return false, err 420 } 421 } 422 423 // TODO(tiborvass): what's the usecase for having manifest == nil and err == nil ? Shouldn't be the error be "DoesNotExist" ? 424 if m == nil { 425 return false, fmt.Errorf("image manifest does not exist for tag %q", tag) 426 } 427 if m.SchemaVersion != 1 { 428 return false, fmt.Errorf("unsupported schema version %d for tag %q", m.SchemaVersion, tag) 429 } 430 if len(m.FSLayers) != len(m.History) { 431 return false, fmt.Errorf("length of history not equal to number of layers for tag %q", tag) 432 } 433 if len(m.FSLayers) == 0 { 434 return false, fmt.Errorf("no FSLayers in manifest for tag %q", tag) 435 } 436 keys, err := manifest.Verify(m) 437 if err != nil { 438 return false, fmt.Errorf("error verifying manifest for tag %q: %v", tag, err) 439 } 440 verified, err = p.verifyTrustedKeys(m.Name, keys) 441 if err != nil { 442 return false, fmt.Errorf("error verifying manifest keys: %v", err) 443 } 444 return verified, nil 445 }