github.com/crquan/docker@v1.8.1/graph/pull_v1.go (about) 1 package graph 2 3 import ( 4 "errors" 5 "fmt" 6 "net" 7 "net/url" 8 "strings" 9 "time" 10 11 "github.com/Sirupsen/logrus" 12 "github.com/docker/distribution/registry/client/transport" 13 "github.com/docker/docker/image" 14 "github.com/docker/docker/pkg/progressreader" 15 "github.com/docker/docker/pkg/streamformatter" 16 "github.com/docker/docker/pkg/stringid" 17 "github.com/docker/docker/registry" 18 "github.com/docker/docker/utils" 19 ) 20 21 type v1Puller struct { 22 *TagStore 23 endpoint registry.APIEndpoint 24 config *ImagePullConfig 25 sf *streamformatter.StreamFormatter 26 repoInfo *registry.RepositoryInfo 27 session *registry.Session 28 } 29 30 func (p *v1Puller) Pull(tag string) (fallback bool, err error) { 31 if utils.DigestReference(tag) { 32 // Allowing fallback, because HTTPS v1 is before HTTP v2 33 return true, registry.ErrNoSupport{errors.New("Cannot pull by digest with v1 registry")} 34 } 35 36 tlsConfig, err := p.registryService.TLSConfig(p.repoInfo.Index.Name) 37 if err != nil { 38 return false, err 39 } 40 // Adds Docker-specific headers as well as user-specified headers (metaHeaders) 41 tr := transport.NewTransport( 42 // TODO(tiborvass): was ReceiveTimeout 43 registry.NewTransport(tlsConfig), 44 registry.DockerHeaders(p.config.MetaHeaders)..., 45 ) 46 client := registry.HTTPClient(tr) 47 v1Endpoint, err := p.endpoint.ToV1Endpoint(p.config.MetaHeaders) 48 if err != nil { 49 logrus.Debugf("Could not get v1 endpoint: %v", err) 50 return true, err 51 } 52 p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) 53 if err != nil { 54 // TODO(dmcgowan): Check if should fallback 55 logrus.Debugf("Fallback from error: %s", err) 56 return true, err 57 } 58 if err := p.pullRepository(tag); err != nil { 59 // TODO(dmcgowan): Check if should fallback 60 return false, err 61 } 62 return false, nil 63 } 64 65 func (p *v1Puller) pullRepository(askedTag string) error { 66 out := p.config.OutStream 67 out.Write(p.sf.FormatStatus("", "Pulling repository %s", p.repoInfo.CanonicalName)) 68 69 repoData, err := p.session.GetRepositoryData(p.repoInfo.RemoteName) 70 if err != nil { 71 if strings.Contains(err.Error(), "HTTP code: 404") { 72 return fmt.Errorf("Error: image %s not found", utils.ImageReference(p.repoInfo.RemoteName, askedTag)) 73 } 74 // Unexpected HTTP error 75 return err 76 } 77 78 logrus.Debugf("Retrieving the tag list") 79 tagsList := make(map[string]string) 80 if askedTag == "" { 81 tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo.RemoteName) 82 } else { 83 var tagId string 84 tagId, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo.RemoteName, askedTag) 85 tagsList[askedTag] = tagId 86 } 87 if err != nil { 88 if err == registry.ErrRepoNotFound && askedTag != "" { 89 return fmt.Errorf("Tag %s not found in repository %s", askedTag, p.repoInfo.CanonicalName) 90 } 91 logrus.Errorf("unable to get remote tags: %s", err) 92 return err 93 } 94 95 for tag, id := range tagsList { 96 repoData.ImgList[id] = ®istry.ImgData{ 97 ID: id, 98 Tag: tag, 99 Checksum: "", 100 } 101 } 102 103 logrus.Debugf("Registering tags") 104 // If no tag has been specified, pull them all 105 if askedTag == "" { 106 for tag, id := range tagsList { 107 repoData.ImgList[id].Tag = tag 108 } 109 } else { 110 // Otherwise, check that the tag exists and use only that one 111 id, exists := tagsList[askedTag] 112 if !exists { 113 return fmt.Errorf("Tag %s not found in repository %s", askedTag, p.repoInfo.CanonicalName) 114 } 115 repoData.ImgList[id].Tag = askedTag 116 } 117 118 errors := make(chan error) 119 120 layersDownloaded := false 121 imgIDs := []string{} 122 sessionID := p.session.ID() 123 defer func() { 124 p.graph.Release(sessionID, imgIDs...) 125 }() 126 for _, image := range repoData.ImgList { 127 downloadImage := func(img *registry.ImgData) { 128 if askedTag != "" && img.Tag != askedTag { 129 errors <- nil 130 return 131 } 132 133 if img.Tag == "" { 134 logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) 135 errors <- nil 136 return 137 } 138 139 // ensure no two downloads of the same image happen at the same time 140 if c, err := p.poolAdd("pull", "img:"+img.ID); err != nil { 141 if c != nil { 142 out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil)) 143 <-c 144 out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil)) 145 } else { 146 logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err) 147 } 148 errors <- nil 149 return 150 } 151 defer p.poolRemove("pull", "img:"+img.ID) 152 153 // we need to retain it until tagging 154 p.graph.Retain(sessionID, img.ID) 155 imgIDs = append(imgIDs, img.ID) 156 157 out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, p.repoInfo.CanonicalName), nil)) 158 success := false 159 var lastErr, err error 160 var isDownloaded bool 161 for _, ep := range p.repoInfo.Index.Mirrors { 162 ep += "v1/" 163 out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.CanonicalName, ep), nil)) 164 if isDownloaded, err = p.pullImage(img.ID, ep, repoData.Tokens); err != nil { 165 // Don't report errors when pulling from mirrors. 166 logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.CanonicalName, ep, err) 167 continue 168 } 169 layersDownloaded = layersDownloaded || isDownloaded 170 success = true 171 break 172 } 173 if !success { 174 for _, ep := range repoData.Endpoints { 175 out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.CanonicalName, ep), nil)) 176 if isDownloaded, err = p.pullImage(img.ID, ep, repoData.Tokens); err != nil { 177 // It's not ideal that only the last error is returned, it would be better to concatenate the errors. 178 // As the error is also given to the output stream the user will see the error. 179 lastErr = err 180 out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.CanonicalName, ep, err), nil)) 181 continue 182 } 183 layersDownloaded = layersDownloaded || isDownloaded 184 success = true 185 break 186 } 187 } 188 if !success { 189 err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.CanonicalName, lastErr) 190 out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), err.Error(), nil)) 191 errors <- err 192 return 193 } 194 out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil)) 195 196 errors <- nil 197 } 198 199 go downloadImage(image) 200 } 201 202 var lastError error 203 for i := 0; i < len(repoData.ImgList); i++ { 204 if err := <-errors; err != nil { 205 lastError = err 206 } 207 } 208 if lastError != nil { 209 return lastError 210 } 211 212 for tag, id := range tagsList { 213 if askedTag != "" && tag != askedTag { 214 continue 215 } 216 if err := p.Tag(p.repoInfo.LocalName, tag, id, true); err != nil { 217 return err 218 } 219 } 220 221 requestedTag := p.repoInfo.LocalName 222 if len(askedTag) > 0 { 223 requestedTag = utils.ImageReference(p.repoInfo.LocalName, askedTag) 224 } 225 WriteStatus(requestedTag, out, p.sf, layersDownloaded) 226 return nil 227 } 228 229 func (p *v1Puller) pullImage(imgID, endpoint string, token []string) (bool, error) { 230 history, err := p.session.GetRemoteHistory(imgID, endpoint) 231 if err != nil { 232 return false, err 233 } 234 out := p.config.OutStream 235 out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Pulling dependent layers", nil)) 236 // FIXME: Try to stream the images? 237 // FIXME: Launch the getRemoteImage() in goroutines 238 239 sessionID := p.session.ID() 240 // As imgID has been retained in pullRepository, no need to retain again 241 p.graph.Retain(sessionID, history[1:]...) 242 defer p.graph.Release(sessionID, history[1:]...) 243 244 layersDownloaded := false 245 for i := len(history) - 1; i >= 0; i-- { 246 id := history[i] 247 248 // ensure no two downloads of the same layer happen at the same time 249 if c, err := p.poolAdd("pull", "layer:"+id); err != nil { 250 logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err) 251 <-c 252 } 253 defer p.poolRemove("pull", "layer:"+id) 254 255 if !p.graph.Exists(id) { 256 out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Pulling metadata", nil)) 257 var ( 258 imgJSON []byte 259 imgSize int 260 err error 261 img *image.Image 262 ) 263 retries := 5 264 for j := 1; j <= retries; j++ { 265 imgJSON, imgSize, err = p.session.GetRemoteImageJSON(id, endpoint) 266 if err != nil && j == retries { 267 out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil)) 268 return layersDownloaded, err 269 } else if err != nil { 270 time.Sleep(time.Duration(j) * 500 * time.Millisecond) 271 continue 272 } 273 img, err = image.NewImgJSON(imgJSON) 274 layersDownloaded = true 275 if err != nil && j == retries { 276 out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil)) 277 return layersDownloaded, fmt.Errorf("Failed to parse json: %s", err) 278 } else if err != nil { 279 time.Sleep(time.Duration(j) * 500 * time.Millisecond) 280 continue 281 } else { 282 break 283 } 284 } 285 286 for j := 1; j <= retries; j++ { 287 // Get the layer 288 status := "Pulling fs layer" 289 if j > 1 { 290 status = fmt.Sprintf("Pulling fs layer [retries: %d]", j) 291 } 292 out.Write(p.sf.FormatProgress(stringid.TruncateID(id), status, nil)) 293 layer, err := p.session.GetRemoteImageLayer(img.ID, endpoint, int64(imgSize)) 294 if uerr, ok := err.(*url.Error); ok { 295 err = uerr.Err 296 } 297 if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { 298 time.Sleep(time.Duration(j) * 500 * time.Millisecond) 299 continue 300 } else if err != nil { 301 out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil)) 302 return layersDownloaded, err 303 } 304 layersDownloaded = true 305 defer layer.Close() 306 307 err = p.graph.Register(img, 308 progressreader.New(progressreader.Config{ 309 In: layer, 310 Out: out, 311 Formatter: p.sf, 312 Size: imgSize, 313 NewLines: false, 314 ID: stringid.TruncateID(id), 315 Action: "Downloading", 316 })) 317 if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { 318 time.Sleep(time.Duration(j) * 500 * time.Millisecond) 319 continue 320 } else if err != nil { 321 out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error downloading dependent layers", nil)) 322 return layersDownloaded, err 323 } else { 324 break 325 } 326 } 327 } 328 out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Download complete", nil)) 329 } 330 return layersDownloaded, nil 331 }