github.com/a4a881d4/docker@v1.9.0-rc2/graph/pull_v1.go (about)

     1  package graph
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"io"
     7  	"net"
     8  	"net/url"
     9  	"strings"
    10  	"time"
    11  
    12  	"github.com/Sirupsen/logrus"
    13  	"github.com/docker/distribution/registry/client/transport"
    14  	"github.com/docker/docker/image"
    15  	"github.com/docker/docker/pkg/progressreader"
    16  	"github.com/docker/docker/pkg/streamformatter"
    17  	"github.com/docker/docker/pkg/stringid"
    18  	"github.com/docker/docker/registry"
    19  	"github.com/docker/docker/utils"
    20  )
    21  
    22  type v1Puller struct {
    23  	*TagStore
    24  	endpoint registry.APIEndpoint
    25  	config   *ImagePullConfig
    26  	sf       *streamformatter.StreamFormatter
    27  	repoInfo *registry.RepositoryInfo
    28  	session  *registry.Session
    29  }
    30  
    31  func (p *v1Puller) Pull(tag string) (fallback bool, err error) {
    32  	if utils.DigestReference(tag) {
    33  		// Allowing fallback, because HTTPS v1 is before HTTP v2
    34  		return true, registry.ErrNoSupport{errors.New("Cannot pull by digest with v1 registry")}
    35  	}
    36  
    37  	tlsConfig, err := p.registryService.TLSConfig(p.repoInfo.Index.Name)
    38  	if err != nil {
    39  		return false, err
    40  	}
    41  	// Adds Docker-specific headers as well as user-specified headers (metaHeaders)
    42  	tr := transport.NewTransport(
    43  		// TODO(tiborvass): was ReceiveTimeout
    44  		registry.NewTransport(tlsConfig),
    45  		registry.DockerHeaders(p.config.MetaHeaders)...,
    46  	)
    47  	client := registry.HTTPClient(tr)
    48  	v1Endpoint, err := p.endpoint.ToV1Endpoint(p.config.MetaHeaders)
    49  	if err != nil {
    50  		logrus.Debugf("Could not get v1 endpoint: %v", err)
    51  		return true, err
    52  	}
    53  	p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint)
    54  	if err != nil {
    55  		// TODO(dmcgowan): Check if should fallback
    56  		logrus.Debugf("Fallback from error: %s", err)
    57  		return true, err
    58  	}
    59  	if err := p.pullRepository(tag); err != nil {
    60  		// TODO(dmcgowan): Check if should fallback
    61  		return false, err
    62  	}
    63  	out := p.config.OutStream
    64  	out.Write(p.sf.FormatStatus("", "%s: this image was pulled from a legacy registry.  Important: This registry version will not be supported in future versions of docker.", p.repoInfo.CanonicalName))
    65  
    66  	return false, nil
    67  }
    68  
    69  func (p *v1Puller) pullRepository(askedTag string) error {
    70  	out := p.config.OutStream
    71  	out.Write(p.sf.FormatStatus("", "Pulling repository %s", p.repoInfo.CanonicalName))
    72  
    73  	repoData, err := p.session.GetRepositoryData(p.repoInfo.RemoteName)
    74  	if err != nil {
    75  		if strings.Contains(err.Error(), "HTTP code: 404") {
    76  			return fmt.Errorf("Error: image %s not found", utils.ImageReference(p.repoInfo.RemoteName, askedTag))
    77  		}
    78  		// Unexpected HTTP error
    79  		return err
    80  	}
    81  
    82  	logrus.Debugf("Retrieving the tag list")
    83  	tagsList := make(map[string]string)
    84  	if askedTag == "" {
    85  		tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo.RemoteName)
    86  	} else {
    87  		var tagID string
    88  		tagID, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo.RemoteName, askedTag)
    89  		tagsList[askedTag] = tagID
    90  	}
    91  	if err != nil {
    92  		if err == registry.ErrRepoNotFound && askedTag != "" {
    93  			return fmt.Errorf("Tag %s not found in repository %s", askedTag, p.repoInfo.CanonicalName)
    94  		}
    95  		logrus.Errorf("unable to get remote tags: %s", err)
    96  		return err
    97  	}
    98  
    99  	for tag, id := range tagsList {
   100  		repoData.ImgList[id] = &registry.ImgData{
   101  			ID:       id,
   102  			Tag:      tag,
   103  			Checksum: "",
   104  		}
   105  	}
   106  
   107  	logrus.Debugf("Registering tags")
   108  	// If no tag has been specified, pull them all
   109  	if askedTag == "" {
   110  		for tag, id := range tagsList {
   111  			repoData.ImgList[id].Tag = tag
   112  		}
   113  	} else {
   114  		// Otherwise, check that the tag exists and use only that one
   115  		id, exists := tagsList[askedTag]
   116  		if !exists {
   117  			return fmt.Errorf("Tag %s not found in repository %s", askedTag, p.repoInfo.CanonicalName)
   118  		}
   119  		repoData.ImgList[id].Tag = askedTag
   120  	}
   121  
   122  	errors := make(chan error)
   123  
   124  	layersDownloaded := false
   125  	imgIDs := []string{}
   126  	sessionID := p.session.ID()
   127  	defer func() {
   128  		p.graph.Release(sessionID, imgIDs...)
   129  	}()
   130  	for _, imgData := range repoData.ImgList {
   131  		downloadImage := func(img *registry.ImgData) {
   132  			if askedTag != "" && img.Tag != askedTag {
   133  				errors <- nil
   134  				return
   135  			}
   136  
   137  			if img.Tag == "" {
   138  				logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
   139  				errors <- nil
   140  				return
   141  			}
   142  
   143  			if err := image.ValidateID(img.ID); err != nil {
   144  				errors <- err
   145  				return
   146  			}
   147  
   148  			// ensure no two downloads of the same image happen at the same time
   149  			poolKey := "img:" + img.ID
   150  			broadcaster, found := p.poolAdd("pull", poolKey)
   151  			broadcaster.Add(out)
   152  			if found {
   153  				errors <- broadcaster.Wait()
   154  				return
   155  			}
   156  			defer p.poolRemove("pull", poolKey)
   157  
   158  			// we need to retain it until tagging
   159  			p.graph.Retain(sessionID, img.ID)
   160  			imgIDs = append(imgIDs, img.ID)
   161  
   162  			broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, p.repoInfo.CanonicalName), nil))
   163  			success := false
   164  			var lastErr, err error
   165  			var isDownloaded bool
   166  			for _, ep := range p.repoInfo.Index.Mirrors {
   167  				ep += "v1/"
   168  				broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.CanonicalName, ep), nil))
   169  				if isDownloaded, err = p.pullImage(broadcaster, img.ID, ep); err != nil {
   170  					// Don't report errors when pulling from mirrors.
   171  					logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.CanonicalName, ep, err)
   172  					continue
   173  				}
   174  				layersDownloaded = layersDownloaded || isDownloaded
   175  				success = true
   176  				break
   177  			}
   178  			if !success {
   179  				for _, ep := range repoData.Endpoints {
   180  					broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.CanonicalName, ep), nil))
   181  					if isDownloaded, err = p.pullImage(broadcaster, img.ID, ep); err != nil {
   182  						// It's not ideal that only the last error is returned, it would be better to concatenate the errors.
   183  						// As the error is also given to the output stream the user will see the error.
   184  						lastErr = err
   185  						broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.CanonicalName, ep, err), nil))
   186  						continue
   187  					}
   188  					layersDownloaded = layersDownloaded || isDownloaded
   189  					success = true
   190  					break
   191  				}
   192  			}
   193  			if !success {
   194  				err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.CanonicalName, lastErr)
   195  				broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), err.Error(), nil))
   196  				errors <- err
   197  				broadcaster.CloseWithError(err)
   198  				return
   199  			}
   200  			broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
   201  
   202  			errors <- nil
   203  		}
   204  
   205  		go downloadImage(imgData)
   206  	}
   207  
   208  	var lastError error
   209  	for i := 0; i < len(repoData.ImgList); i++ {
   210  		if err := <-errors; err != nil {
   211  			lastError = err
   212  		}
   213  	}
   214  	if lastError != nil {
   215  		return lastError
   216  	}
   217  
   218  	for tag, id := range tagsList {
   219  		if askedTag != "" && tag != askedTag {
   220  			continue
   221  		}
   222  		if err := p.Tag(p.repoInfo.LocalName, tag, id, true); err != nil {
   223  			return err
   224  		}
   225  	}
   226  
   227  	requestedTag := p.repoInfo.LocalName
   228  	if len(askedTag) > 0 {
   229  		requestedTag = utils.ImageReference(p.repoInfo.LocalName, askedTag)
   230  	}
   231  	writeStatus(requestedTag, out, p.sf, layersDownloaded)
   232  	return nil
   233  }
   234  
   235  func (p *v1Puller) pullImage(out io.Writer, imgID, endpoint string) (layersDownloaded bool, err error) {
   236  	var history []string
   237  	history, err = p.session.GetRemoteHistory(imgID, endpoint)
   238  	if err != nil {
   239  		return false, err
   240  	}
   241  	out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Pulling dependent layers", nil))
   242  	// FIXME: Try to stream the images?
   243  	// FIXME: Launch the getRemoteImage() in goroutines
   244  
   245  	sessionID := p.session.ID()
   246  	// As imgID has been retained in pullRepository, no need to retain again
   247  	p.graph.Retain(sessionID, history[1:]...)
   248  	defer p.graph.Release(sessionID, history[1:]...)
   249  
   250  	layersDownloaded = false
   251  	for i := len(history) - 1; i >= 0; i-- {
   252  		id := history[i]
   253  
   254  		// ensure no two downloads of the same layer happen at the same time
   255  		poolKey := "layer:" + id
   256  		broadcaster, found := p.poolAdd("pull", poolKey)
   257  		broadcaster.Add(out)
   258  		if found {
   259  			logrus.Debugf("Image (id: %s) pull is already running, skipping", id)
   260  			err = broadcaster.Wait()
   261  			if err != nil {
   262  				return layersDownloaded, err
   263  			}
   264  			continue
   265  		}
   266  
   267  		// This must use a closure so it captures the value of err when
   268  		// the function returns, not when the 'defer' is evaluated.
   269  		defer func() {
   270  			p.poolRemoveWithError("pull", poolKey, err)
   271  		}()
   272  
   273  		if !p.graph.Exists(id) {
   274  			broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Pulling metadata", nil))
   275  			var (
   276  				imgJSON []byte
   277  				imgSize int64
   278  				err     error
   279  				img     *image.Image
   280  			)
   281  			retries := 5
   282  			for j := 1; j <= retries; j++ {
   283  				imgJSON, imgSize, err = p.session.GetRemoteImageJSON(id, endpoint)
   284  				if err != nil && j == retries {
   285  					broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
   286  					return layersDownloaded, err
   287  				} else if err != nil {
   288  					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
   289  					continue
   290  				}
   291  				img, err = image.NewImgJSON(imgJSON)
   292  				layersDownloaded = true
   293  				if err != nil && j == retries {
   294  					broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
   295  					return layersDownloaded, fmt.Errorf("Failed to parse json: %s", err)
   296  				} else if err != nil {
   297  					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
   298  					continue
   299  				} else {
   300  					break
   301  				}
   302  			}
   303  
   304  			for j := 1; j <= retries; j++ {
   305  				// Get the layer
   306  				status := "Pulling fs layer"
   307  				if j > 1 {
   308  					status = fmt.Sprintf("Pulling fs layer [retries: %d]", j)
   309  				}
   310  				broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), status, nil))
   311  				layer, err := p.session.GetRemoteImageLayer(img.ID, endpoint, imgSize)
   312  				if uerr, ok := err.(*url.Error); ok {
   313  					err = uerr.Err
   314  				}
   315  				if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
   316  					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
   317  					continue
   318  				} else if err != nil {
   319  					broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
   320  					return layersDownloaded, err
   321  				}
   322  				layersDownloaded = true
   323  				defer layer.Close()
   324  
   325  				err = p.graph.Register(v1Descriptor{img},
   326  					progressreader.New(progressreader.Config{
   327  						In:        layer,
   328  						Out:       broadcaster,
   329  						Formatter: p.sf,
   330  						Size:      imgSize,
   331  						NewLines:  false,
   332  						ID:        stringid.TruncateID(id),
   333  						Action:    "Downloading",
   334  					}))
   335  				if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
   336  					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
   337  					continue
   338  				} else if err != nil {
   339  					broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error downloading dependent layers", nil))
   340  					return layersDownloaded, err
   341  				} else {
   342  					break
   343  				}
   344  			}
   345  		}
   346  		broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Download complete", nil))
   347  		broadcaster.Close()
   348  	}
   349  	return layersDownloaded, nil
   350  }