github.com/mheon/docker@v0.11.2-0.20150922122814-44f47903a831/graph/pull_v1.go (about) 1 package graph 2 3 import ( 4 "errors" 5 "fmt" 6 "io" 7 "net" 8 "net/url" 9 "strings" 10 "time" 11 12 "github.com/Sirupsen/logrus" 13 "github.com/docker/distribution/registry/client/transport" 14 "github.com/docker/docker/image" 15 "github.com/docker/docker/pkg/progressreader" 16 "github.com/docker/docker/pkg/streamformatter" 17 "github.com/docker/docker/pkg/stringid" 18 "github.com/docker/docker/registry" 19 "github.com/docker/docker/utils" 20 ) 21 22 type v1Puller struct { 23 *TagStore 24 endpoint registry.APIEndpoint 25 config *ImagePullConfig 26 sf *streamformatter.StreamFormatter 27 repoInfo *registry.RepositoryInfo 28 session *registry.Session 29 } 30 31 func (p *v1Puller) Pull(tag string) (fallback bool, err error) { 32 if utils.DigestReference(tag) { 33 // Allowing fallback, because HTTPS v1 is before HTTP v2 34 return true, registry.ErrNoSupport{errors.New("Cannot pull by digest with v1 registry")} 35 } 36 37 tlsConfig, err := p.registryService.TLSConfig(p.repoInfo.Index.Name) 38 if err != nil { 39 return false, err 40 } 41 // Adds Docker-specific headers as well as user-specified headers (metaHeaders) 42 tr := transport.NewTransport( 43 // TODO(tiborvass): was ReceiveTimeout 44 registry.NewTransport(tlsConfig), 45 registry.DockerHeaders(p.config.MetaHeaders)..., 46 ) 47 client := registry.HTTPClient(tr) 48 v1Endpoint, err := p.endpoint.ToV1Endpoint(p.config.MetaHeaders) 49 if err != nil { 50 logrus.Debugf("Could not get v1 endpoint: %v", err) 51 return true, err 52 } 53 p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) 54 if err != nil { 55 // TODO(dmcgowan): Check if should fallback 56 logrus.Debugf("Fallback from error: %s", err) 57 return true, err 58 } 59 if err := p.pullRepository(tag); err != nil { 60 // TODO(dmcgowan): Check if should fallback 61 return false, err 62 } 63 return false, nil 64 } 65 66 func (p *v1Puller) pullRepository(askedTag string) error { 67 out := p.config.OutStream 68 out.Write(p.sf.FormatStatus("", "Pulling repository %s", p.repoInfo.CanonicalName)) 69 70 repoData, err := p.session.GetRepositoryData(p.repoInfo.RemoteName) 71 if err != nil { 72 if strings.Contains(err.Error(), "HTTP code: 404") { 73 return fmt.Errorf("Error: image %s not found", utils.ImageReference(p.repoInfo.RemoteName, askedTag)) 74 } 75 // Unexpected HTTP error 76 return err 77 } 78 79 logrus.Debugf("Retrieving the tag list") 80 tagsList := make(map[string]string) 81 if askedTag == "" { 82 tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo.RemoteName) 83 } else { 84 var tagID string 85 tagID, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo.RemoteName, askedTag) 86 tagsList[askedTag] = tagID 87 } 88 if err != nil { 89 if err == registry.ErrRepoNotFound && askedTag != "" { 90 return fmt.Errorf("Tag %s not found in repository %s", askedTag, p.repoInfo.CanonicalName) 91 } 92 logrus.Errorf("unable to get remote tags: %s", err) 93 return err 94 } 95 96 for tag, id := range tagsList { 97 repoData.ImgList[id] = ®istry.ImgData{ 98 ID: id, 99 Tag: tag, 100 Checksum: "", 101 } 102 } 103 104 logrus.Debugf("Registering tags") 105 // If no tag has been specified, pull them all 106 if askedTag == "" { 107 for tag, id := range tagsList { 108 repoData.ImgList[id].Tag = tag 109 } 110 } else { 111 // Otherwise, check that the tag exists and use only that one 112 id, exists := tagsList[askedTag] 113 if !exists { 114 return fmt.Errorf("Tag %s not found in repository %s", askedTag, p.repoInfo.CanonicalName) 115 } 116 repoData.ImgList[id].Tag = askedTag 117 } 118 119 errors := make(chan error) 120 121 layersDownloaded := false 122 imgIDs := []string{} 123 sessionID := p.session.ID() 124 defer func() { 125 p.graph.Release(sessionID, imgIDs...) 126 }() 127 for _, image := range repoData.ImgList { 128 downloadImage := func(img *registry.ImgData) { 129 if askedTag != "" && img.Tag != askedTag { 130 errors <- nil 131 return 132 } 133 134 if img.Tag == "" { 135 logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) 136 errors <- nil 137 return 138 } 139 140 // ensure no two downloads of the same image happen at the same time 141 poolKey := "img:" + img.ID 142 broadcaster, found := p.poolAdd("pull", poolKey) 143 broadcaster.Add(out) 144 if found { 145 errors <- broadcaster.Wait() 146 return 147 } 148 defer p.poolRemove("pull", poolKey) 149 150 // we need to retain it until tagging 151 p.graph.Retain(sessionID, img.ID) 152 imgIDs = append(imgIDs, img.ID) 153 154 broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, p.repoInfo.CanonicalName), nil)) 155 success := false 156 var lastErr, err error 157 var isDownloaded bool 158 for _, ep := range p.repoInfo.Index.Mirrors { 159 ep += "v1/" 160 broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.CanonicalName, ep), nil)) 161 if isDownloaded, err = p.pullImage(broadcaster, img.ID, ep); err != nil { 162 // Don't report errors when pulling from mirrors. 163 logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.CanonicalName, ep, err) 164 continue 165 } 166 layersDownloaded = layersDownloaded || isDownloaded 167 success = true 168 break 169 } 170 if !success { 171 for _, ep := range repoData.Endpoints { 172 broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.CanonicalName, ep), nil)) 173 if isDownloaded, err = p.pullImage(broadcaster, img.ID, ep); err != nil { 174 // It's not ideal that only the last error is returned, it would be better to concatenate the errors. 175 // As the error is also given to the output stream the user will see the error. 176 lastErr = err 177 broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.CanonicalName, ep, err), nil)) 178 continue 179 } 180 layersDownloaded = layersDownloaded || isDownloaded 181 success = true 182 break 183 } 184 } 185 if !success { 186 err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.CanonicalName, lastErr) 187 broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), err.Error(), nil)) 188 errors <- err 189 broadcaster.CloseWithError(err) 190 return 191 } 192 broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil)) 193 194 errors <- nil 195 } 196 197 go downloadImage(image) 198 } 199 200 var lastError error 201 for i := 0; i < len(repoData.ImgList); i++ { 202 if err := <-errors; err != nil { 203 lastError = err 204 } 205 } 206 if lastError != nil { 207 return lastError 208 } 209 210 for tag, id := range tagsList { 211 if askedTag != "" && tag != askedTag { 212 continue 213 } 214 if err := p.Tag(p.repoInfo.LocalName, tag, id, true); err != nil { 215 return err 216 } 217 } 218 219 requestedTag := p.repoInfo.LocalName 220 if len(askedTag) > 0 { 221 requestedTag = utils.ImageReference(p.repoInfo.LocalName, askedTag) 222 } 223 writeStatus(requestedTag, out, p.sf, layersDownloaded) 224 return nil 225 } 226 227 func (p *v1Puller) pullImage(out io.Writer, imgID, endpoint string) (layersDownloaded bool, err error) { 228 var history []string 229 history, err = p.session.GetRemoteHistory(imgID, endpoint) 230 if err != nil { 231 return false, err 232 } 233 out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Pulling dependent layers", nil)) 234 // FIXME: Try to stream the images? 235 // FIXME: Launch the getRemoteImage() in goroutines 236 237 sessionID := p.session.ID() 238 // As imgID has been retained in pullRepository, no need to retain again 239 p.graph.Retain(sessionID, history[1:]...) 240 defer p.graph.Release(sessionID, history[1:]...) 241 242 layersDownloaded = false 243 for i := len(history) - 1; i >= 0; i-- { 244 id := history[i] 245 246 // ensure no two downloads of the same layer happen at the same time 247 poolKey := "layer:" + id 248 broadcaster, found := p.poolAdd("pull", poolKey) 249 broadcaster.Add(out) 250 if found { 251 logrus.Debugf("Image (id: %s) pull is already running, skipping", id) 252 err = broadcaster.Wait() 253 if err != nil { 254 return layersDownloaded, err 255 } 256 continue 257 } 258 259 // This must use a closure so it captures the value of err when 260 // the function returns, not when the 'defer' is evaluated. 261 defer func() { 262 p.poolRemoveWithError("pull", poolKey, err) 263 }() 264 265 if !p.graph.Exists(id) { 266 broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Pulling metadata", nil)) 267 var ( 268 imgJSON []byte 269 imgSize int64 270 err error 271 img *image.Image 272 ) 273 retries := 5 274 for j := 1; j <= retries; j++ { 275 imgJSON, imgSize, err = p.session.GetRemoteImageJSON(id, endpoint) 276 if err != nil && j == retries { 277 broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil)) 278 return layersDownloaded, err 279 } else if err != nil { 280 time.Sleep(time.Duration(j) * 500 * time.Millisecond) 281 continue 282 } 283 img, err = image.NewImgJSON(imgJSON) 284 layersDownloaded = true 285 if err != nil && j == retries { 286 broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil)) 287 return layersDownloaded, fmt.Errorf("Failed to parse json: %s", err) 288 } else if err != nil { 289 time.Sleep(time.Duration(j) * 500 * time.Millisecond) 290 continue 291 } else { 292 break 293 } 294 } 295 296 for j := 1; j <= retries; j++ { 297 // Get the layer 298 status := "Pulling fs layer" 299 if j > 1 { 300 status = fmt.Sprintf("Pulling fs layer [retries: %d]", j) 301 } 302 broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), status, nil)) 303 layer, err := p.session.GetRemoteImageLayer(img.ID, endpoint, imgSize) 304 if uerr, ok := err.(*url.Error); ok { 305 err = uerr.Err 306 } 307 if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { 308 time.Sleep(time.Duration(j) * 500 * time.Millisecond) 309 continue 310 } else if err != nil { 311 broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil)) 312 return layersDownloaded, err 313 } 314 layersDownloaded = true 315 defer layer.Close() 316 317 err = p.graph.Register(img, 318 progressreader.New(progressreader.Config{ 319 In: layer, 320 Out: broadcaster, 321 Formatter: p.sf, 322 Size: imgSize, 323 NewLines: false, 324 ID: stringid.TruncateID(id), 325 Action: "Downloading", 326 })) 327 if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { 328 time.Sleep(time.Duration(j) * 500 * time.Millisecond) 329 continue 330 } else if err != nil { 331 broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error downloading dependent layers", nil)) 332 return layersDownloaded, err 333 } else { 334 break 335 } 336 } 337 } 338 broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Download complete", nil)) 339 broadcaster.Close() 340 } 341 return layersDownloaded, nil 342 }