github.com/dougm/docker@v1.5.0/graph/push.go (about) 1 package graph 2 3 import ( 4 "bytes" 5 "errors" 6 "fmt" 7 "io" 8 "io/ioutil" 9 "os" 10 "path" 11 "strings" 12 "sync" 13 14 log "github.com/Sirupsen/logrus" 15 "github.com/docker/docker/engine" 16 "github.com/docker/docker/image" 17 "github.com/docker/docker/registry" 18 "github.com/docker/docker/utils" 19 "github.com/docker/libtrust" 20 ) 21 22 var ErrV2RegistryUnavailable = errors.New("error v2 registry unavailable") 23 24 // Retrieve the all the images to be uploaded in the correct order 25 func (s *TagStore) getImageList(localRepo map[string]string, requestedTag string) ([]string, map[string][]string, error) { 26 var ( 27 imageList []string 28 imagesSeen = make(map[string]bool) 29 tagsByImage = make(map[string][]string) 30 ) 31 32 for tag, id := range localRepo { 33 if requestedTag != "" && requestedTag != tag { 34 continue 35 } 36 var imageListForThisTag []string 37 38 tagsByImage[id] = append(tagsByImage[id], tag) 39 40 for img, err := s.graph.Get(id); img != nil; img, err = img.GetParent() { 41 if err != nil { 42 return nil, nil, err 43 } 44 45 if imagesSeen[img.ID] { 46 // This image is already on the list, we can ignore it and all its parents 47 break 48 } 49 50 imagesSeen[img.ID] = true 51 imageListForThisTag = append(imageListForThisTag, img.ID) 52 } 53 54 // reverse the image list for this tag (so the "most"-parent image is first) 55 for i, j := 0, len(imageListForThisTag)-1; i < j; i, j = i+1, j-1 { 56 imageListForThisTag[i], imageListForThisTag[j] = imageListForThisTag[j], imageListForThisTag[i] 57 } 58 59 // append to main image list 60 imageList = append(imageList, imageListForThisTag...) 61 } 62 if len(imageList) == 0 { 63 return nil, nil, fmt.Errorf("No images found for the requested repository / tag") 64 } 65 log.Debugf("Image list: %v", imageList) 66 log.Debugf("Tags by image: %v", tagsByImage) 67 68 return imageList, tagsByImage, nil 69 } 70 71 func (s *TagStore) getImageTags(localName, askedTag string) ([]string, error) { 72 localRepo, err := s.Get(localName) 73 if err != nil { 74 return nil, err 75 } 76 log.Debugf("Checking %s against %#v", askedTag, localRepo) 77 if len(askedTag) > 0 { 78 if _, ok := localRepo[askedTag]; !ok { 79 return nil, fmt.Errorf("Tag does not exist for %s:%s", localName, askedTag) 80 } 81 return []string{askedTag}, nil 82 } 83 var tags []string 84 for tag := range localRepo { 85 tags = append(tags, tag) 86 } 87 return tags, nil 88 } 89 90 // createImageIndex returns an index of an image's layer IDs and tags. 91 func (s *TagStore) createImageIndex(images []string, tags map[string][]string) []*registry.ImgData { 92 var imageIndex []*registry.ImgData 93 for _, id := range images { 94 if tags, hasTags := tags[id]; hasTags { 95 // If an image has tags you must add an entry in the image index 96 // for each tag 97 for _, tag := range tags { 98 imageIndex = append(imageIndex, ®istry.ImgData{ 99 ID: id, 100 Tag: tag, 101 }) 102 } 103 continue 104 } 105 // If the image does not have a tag it still needs to be sent to the 106 // registry with an empty tag so that it is accociated with the repository 107 imageIndex = append(imageIndex, ®istry.ImgData{ 108 ID: id, 109 Tag: "", 110 }) 111 } 112 return imageIndex 113 } 114 115 type imagePushData struct { 116 id string 117 endpoint string 118 tokens []string 119 } 120 121 // lookupImageOnEndpoint checks the specified endpoint to see if an image exists 122 // and if it is absent then it sends the image id to the channel to be pushed. 123 func lookupImageOnEndpoint(wg *sync.WaitGroup, r *registry.Session, out io.Writer, sf *utils.StreamFormatter, 124 images chan imagePushData, imagesToPush chan string) { 125 defer wg.Done() 126 for image := range images { 127 if err := r.LookupRemoteImage(image.id, image.endpoint, image.tokens); err != nil { 128 log.Errorf("Error in LookupRemoteImage: %s", err) 129 imagesToPush <- image.id 130 continue 131 } 132 out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", utils.TruncateID(image.id))) 133 } 134 } 135 136 func (s *TagStore) pushImageToEndpoint(endpoint string, out io.Writer, remoteName string, imageIDs []string, 137 tags map[string][]string, repo *registry.RepositoryData, sf *utils.StreamFormatter, r *registry.Session) error { 138 workerCount := len(imageIDs) 139 // start a maximum of 5 workers to check if images exist on the specified endpoint. 140 if workerCount > 5 { 141 workerCount = 5 142 } 143 var ( 144 wg = &sync.WaitGroup{} 145 imageData = make(chan imagePushData, workerCount*2) 146 imagesToPush = make(chan string, workerCount*2) 147 pushes = make(chan map[string]struct{}, 1) 148 ) 149 for i := 0; i < workerCount; i++ { 150 wg.Add(1) 151 go lookupImageOnEndpoint(wg, r, out, sf, imageData, imagesToPush) 152 } 153 // start a go routine that consumes the images to push 154 go func() { 155 shouldPush := make(map[string]struct{}) 156 for id := range imagesToPush { 157 shouldPush[id] = struct{}{} 158 } 159 pushes <- shouldPush 160 }() 161 for _, id := range imageIDs { 162 imageData <- imagePushData{ 163 id: id, 164 endpoint: endpoint, 165 tokens: repo.Tokens, 166 } 167 } 168 // close the channel to notify the workers that there will be no more images to check. 169 close(imageData) 170 wg.Wait() 171 close(imagesToPush) 172 // wait for all the images that require pushes to be collected into a consumable map. 173 shouldPush := <-pushes 174 // finish by pushing any images and tags to the endpoint. The order that the images are pushed 175 // is very important that is why we are still itterating over the ordered list of imageIDs. 176 for _, id := range imageIDs { 177 if _, push := shouldPush[id]; push { 178 if _, err := s.pushImage(r, out, id, endpoint, repo.Tokens, sf); err != nil { 179 // FIXME: Continue on error? 180 return err 181 } 182 } 183 for _, tag := range tags[id] { 184 out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", utils.TruncateID(id), endpoint+"repositories/"+remoteName+"/tags/"+tag)) 185 if err := r.PushRegistryTag(remoteName, id, tag, endpoint, repo.Tokens); err != nil { 186 return err 187 } 188 } 189 } 190 return nil 191 } 192 193 // pushRepository pushes layers that do not already exist on the registry. 194 func (s *TagStore) pushRepository(r *registry.Session, out io.Writer, 195 repoInfo *registry.RepositoryInfo, localRepo map[string]string, 196 tag string, sf *utils.StreamFormatter) error { 197 log.Debugf("Local repo: %s", localRepo) 198 out = utils.NewWriteFlusher(out) 199 imgList, tags, err := s.getImageList(localRepo, tag) 200 if err != nil { 201 return err 202 } 203 out.Write(sf.FormatStatus("", "Sending image list")) 204 205 imageIndex := s.createImageIndex(imgList, tags) 206 log.Debugf("Preparing to push %s with the following images and tags", localRepo) 207 for _, data := range imageIndex { 208 log.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) 209 } 210 // Register all the images in a repository with the registry 211 // If an image is not in this list it will not be associated with the repository 212 repoData, err := r.PushImageJSONIndex(repoInfo.RemoteName, imageIndex, false, nil) 213 if err != nil { 214 return err 215 } 216 nTag := 1 217 if tag == "" { 218 nTag = len(localRepo) 219 } 220 out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", repoInfo.CanonicalName, nTag)) 221 // push the repository to each of the endpoints only if it does not exist. 222 for _, endpoint := range repoData.Endpoints { 223 if err := s.pushImageToEndpoint(endpoint, out, repoInfo.RemoteName, imgList, tags, repoData, sf, r); err != nil { 224 return err 225 } 226 } 227 _, err = r.PushImageJSONIndex(repoInfo.RemoteName, imageIndex, true, repoData.Endpoints) 228 return err 229 } 230 231 func (s *TagStore) pushImage(r *registry.Session, out io.Writer, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) { 232 out = utils.NewWriteFlusher(out) 233 jsonRaw, err := ioutil.ReadFile(path.Join(s.graph.Root, imgID, "json")) 234 if err != nil { 235 return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err) 236 } 237 out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pushing", nil)) 238 239 imgData := ®istry.ImgData{ 240 ID: imgID, 241 } 242 243 // Send the json 244 if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil { 245 if err == registry.ErrAlreadyExists { 246 out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image already pushed, skipping", nil)) 247 return "", nil 248 } 249 return "", err 250 } 251 252 layerData, err := s.graph.TempLayerArchive(imgID, sf, out) 253 if err != nil { 254 return "", fmt.Errorf("Failed to generate layer archive: %s", err) 255 } 256 defer os.RemoveAll(layerData.Name()) 257 258 // Send the layer 259 log.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size) 260 261 checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, utils.TruncateID(imgData.ID), "Pushing"), ep, token, jsonRaw) 262 if err != nil { 263 return "", err 264 } 265 imgData.Checksum = checksum 266 imgData.ChecksumPayload = checksumPayload 267 // Send the checksum 268 if err := r.PushImageChecksumRegistry(imgData, ep, token); err != nil { 269 return "", err 270 } 271 272 out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image successfully pushed", nil)) 273 return imgData.Checksum, nil 274 } 275 276 func (s *TagStore) pushV2Repository(r *registry.Session, eng *engine.Engine, out io.Writer, repoInfo *registry.RepositoryInfo, tag string, sf *utils.StreamFormatter) error { 277 if repoInfo.Official { 278 j := eng.Job("trust_update_base") 279 if err := j.Run(); err != nil { 280 log.Errorf("error updating trust base graph: %s", err) 281 } 282 } 283 284 endpoint, err := r.V2RegistryEndpoint(repoInfo.Index) 285 if err != nil { 286 if repoInfo.Index.Official { 287 log.Infof("Unable to push to V2 registry, falling back to v1: %s", err) 288 return ErrV2RegistryUnavailable 289 } 290 return fmt.Errorf("error getting registry endpoint: %s", err) 291 } 292 293 tags, err := s.getImageTags(repoInfo.LocalName, tag) 294 if err != nil { 295 return err 296 } 297 if len(tags) == 0 { 298 return fmt.Errorf("No tags to push for %s", repoInfo.LocalName) 299 } 300 301 auth, err := r.GetV2Authorization(endpoint, repoInfo.RemoteName, false) 302 if err != nil { 303 return fmt.Errorf("error getting authorization: %s", err) 304 } 305 306 for _, tag := range tags { 307 log.Debugf("Pushing %s:%s to v2 repository", repoInfo.LocalName, tag) 308 mBytes, err := s.newManifest(repoInfo.LocalName, repoInfo.RemoteName, tag) 309 if err != nil { 310 return err 311 } 312 js, err := libtrust.NewJSONSignature(mBytes) 313 if err != nil { 314 return err 315 } 316 317 if err = js.Sign(s.trustKey); err != nil { 318 return err 319 } 320 321 signedBody, err := js.PrettySignature("signatures") 322 if err != nil { 323 return err 324 } 325 log.Infof("Signed manifest for %s:%s using daemon's key: %s", repoInfo.LocalName, tag, s.trustKey.KeyID()) 326 327 manifestBytes := string(signedBody) 328 329 manifest, verified, err := s.loadManifest(eng, signedBody) 330 if err != nil { 331 return fmt.Errorf("error verifying manifest: %s", err) 332 } 333 334 if err := checkValidManifest(manifest); err != nil { 335 return fmt.Errorf("invalid manifest: %s", err) 336 } 337 338 if verified { 339 log.Infof("Pushing verified image, key %s is registered for %q", s.trustKey.KeyID(), repoInfo.RemoteName) 340 } 341 342 for i := len(manifest.FSLayers) - 1; i >= 0; i-- { 343 var ( 344 sumStr = manifest.FSLayers[i].BlobSum 345 imgJSON = []byte(manifest.History[i].V1Compatibility) 346 ) 347 348 sumParts := strings.SplitN(sumStr, ":", 2) 349 if len(sumParts) < 2 { 350 return fmt.Errorf("Invalid checksum: %s", sumStr) 351 } 352 manifestSum := sumParts[1] 353 354 img, err := image.NewImgJSON(imgJSON) 355 if err != nil { 356 return fmt.Errorf("Failed to parse json: %s", err) 357 } 358 359 // Call mount blob 360 exists, err := r.HeadV2ImageBlob(endpoint, repoInfo.RemoteName, sumParts[0], manifestSum, auth) 361 if err != nil { 362 out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Image push failed", nil)) 363 return err 364 } 365 366 if !exists { 367 if err := s.pushV2Image(r, img, endpoint, repoInfo.RemoteName, sumParts[0], manifestSum, sf, out, auth); err != nil { 368 return err 369 } 370 } else { 371 out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Image already exists", nil)) 372 } 373 } 374 375 // push the manifest 376 if err := r.PutV2ImageManifest(endpoint, repoInfo.RemoteName, tag, bytes.NewReader([]byte(manifestBytes)), auth); err != nil { 377 return err 378 } 379 } 380 return nil 381 } 382 383 // PushV2Image pushes the image content to the v2 registry, first buffering the contents to disk 384 func (s *TagStore) pushV2Image(r *registry.Session, img *image.Image, endpoint *registry.Endpoint, imageName, sumType, sumStr string, sf *utils.StreamFormatter, out io.Writer, auth *registry.RequestAuthorization) error { 385 out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Buffering to Disk", nil)) 386 387 image, err := s.graph.Get(img.ID) 388 if err != nil { 389 return err 390 } 391 arch, err := image.TarLayer() 392 if err != nil { 393 return err 394 } 395 tf, err := s.graph.newTempFile() 396 if err != nil { 397 return err 398 } 399 defer func() { 400 tf.Close() 401 os.Remove(tf.Name()) 402 }() 403 404 size, err := bufferToFile(tf, arch) 405 if err != nil { 406 return err 407 } 408 409 // Send the layer 410 log.Debugf("rendered layer for %s of [%d] size", img.ID, size) 411 412 if err := r.PutV2ImageBlob(endpoint, imageName, sumType, sumStr, utils.ProgressReader(tf, int(size), out, sf, false, utils.TruncateID(img.ID), "Pushing"), auth); err != nil { 413 out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Image push failed", nil)) 414 return err 415 } 416 out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Image successfully pushed", nil)) 417 return nil 418 } 419 420 // FIXME: Allow to interrupt current push when new push of same image is done. 421 func (s *TagStore) CmdPush(job *engine.Job) engine.Status { 422 if n := len(job.Args); n != 1 { 423 return job.Errorf("Usage: %s IMAGE", job.Name) 424 } 425 var ( 426 localName = job.Args[0] 427 sf = utils.NewStreamFormatter(job.GetenvBool("json")) 428 authConfig = ®istry.AuthConfig{} 429 metaHeaders map[string][]string 430 ) 431 432 // Resolve the Repository name from fqn to RepositoryInfo 433 repoInfo, err := registry.ResolveRepositoryInfo(job, localName) 434 if err != nil { 435 return job.Error(err) 436 } 437 438 tag := job.Getenv("tag") 439 job.GetenvJson("authConfig", authConfig) 440 job.GetenvJson("metaHeaders", &metaHeaders) 441 442 if _, err := s.poolAdd("push", repoInfo.LocalName); err != nil { 443 return job.Error(err) 444 } 445 defer s.poolRemove("push", repoInfo.LocalName) 446 447 endpoint, err := repoInfo.GetEndpoint() 448 if err != nil { 449 return job.Error(err) 450 } 451 452 img, err := s.graph.Get(repoInfo.LocalName) 453 r, err2 := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, false) 454 if err2 != nil { 455 return job.Error(err2) 456 } 457 458 if endpoint.Version == registry.APIVersion2 { 459 err := s.pushV2Repository(r, job.Eng, job.Stdout, repoInfo, tag, sf) 460 if err == nil { 461 return engine.StatusOK 462 } 463 464 if err != ErrV2RegistryUnavailable { 465 return job.Errorf("Error pushing to registry: %s", err) 466 } 467 } 468 469 if err != nil { 470 reposLen := 1 471 if tag == "" { 472 reposLen = len(s.Repositories[repoInfo.LocalName]) 473 } 474 job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", repoInfo.CanonicalName, reposLen)) 475 // If it fails, try to get the repository 476 if localRepo, exists := s.Repositories[repoInfo.LocalName]; exists { 477 if err := s.pushRepository(r, job.Stdout, repoInfo, localRepo, tag, sf); err != nil { 478 return job.Error(err) 479 } 480 return engine.StatusOK 481 } 482 return job.Error(err) 483 } 484 485 var token []string 486 job.Stdout.Write(sf.FormatStatus("", "The push refers to an image: [%s]", repoInfo.CanonicalName)) 487 if _, err := s.pushImage(r, job.Stdout, img.ID, endpoint.String(), token, sf); err != nil { 488 return job.Error(err) 489 } 490 return engine.StatusOK 491 }