github.com/mheon/docker@v0.11.2-0.20150922122814-44f47903a831/graph/push_v2.go (about)

     1  package graph
     2  
     3  import (
     4  	"fmt"
     5  	"io"
     6  	"io/ioutil"
     7  
     8  	"github.com/Sirupsen/logrus"
     9  	"github.com/docker/distribution"
    10  	"github.com/docker/distribution/digest"
    11  	"github.com/docker/distribution/manifest"
    12  	"github.com/docker/docker/image"
    13  	"github.com/docker/docker/pkg/progressreader"
    14  	"github.com/docker/docker/pkg/streamformatter"
    15  	"github.com/docker/docker/pkg/stringid"
    16  	"github.com/docker/docker/registry"
    17  	"github.com/docker/docker/runconfig"
    18  	"github.com/docker/docker/utils"
    19  	"golang.org/x/net/context"
    20  )
    21  
    22  type v2Pusher struct {
    23  	*TagStore
    24  	endpoint  registry.APIEndpoint
    25  	localRepo Repository
    26  	repoInfo  *registry.RepositoryInfo
    27  	config    *ImagePushConfig
    28  	sf        *streamformatter.StreamFormatter
    29  	repo      distribution.Repository
    30  
    31  	// layersPushed is the set of layers known to exist on the remote side.
    32  	// This avoids redundant queries when pushing multiple tags that
    33  	// involve the same layers.
    34  	layersPushed map[digest.Digest]bool
    35  }
    36  
    37  func (p *v2Pusher) Push() (fallback bool, err error) {
    38  	p.repo, err = NewV2Repository(p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull")
    39  	if err != nil {
    40  		logrus.Debugf("Error getting v2 registry: %v", err)
    41  		return true, err
    42  	}
    43  	return false, p.pushV2Repository(p.config.Tag)
    44  }
    45  
    46  func (p *v2Pusher) getImageTags(askedTag string) ([]string, error) {
    47  	logrus.Debugf("Checking %q against %#v", askedTag, p.localRepo)
    48  	if len(askedTag) > 0 {
    49  		if _, ok := p.localRepo[askedTag]; !ok || utils.DigestReference(askedTag) {
    50  			return nil, fmt.Errorf("Tag does not exist for %s", askedTag)
    51  		}
    52  		return []string{askedTag}, nil
    53  	}
    54  	var tags []string
    55  	for tag := range p.localRepo {
    56  		if !utils.DigestReference(tag) {
    57  			tags = append(tags, tag)
    58  		}
    59  	}
    60  	return tags, nil
    61  }
    62  
    63  func (p *v2Pusher) pushV2Repository(tag string) error {
    64  	localName := p.repoInfo.LocalName
    65  	if _, found := p.poolAdd("push", localName); found {
    66  		return fmt.Errorf("push or pull %s is already in progress", localName)
    67  	}
    68  	defer p.poolRemove("push", localName)
    69  
    70  	tags, err := p.getImageTags(tag)
    71  	if err != nil {
    72  		return fmt.Errorf("error getting tags for %s: %s", localName, err)
    73  	}
    74  	if len(tags) == 0 {
    75  		return fmt.Errorf("no tags to push for %s", localName)
    76  	}
    77  
    78  	for _, tag := range tags {
    79  		if err := p.pushV2Tag(tag); err != nil {
    80  			return err
    81  		}
    82  	}
    83  
    84  	return nil
    85  }
    86  
    87  func (p *v2Pusher) pushV2Tag(tag string) error {
    88  	logrus.Debugf("Pushing repository: %s:%s", p.repo.Name(), tag)
    89  
    90  	layerID, exists := p.localRepo[tag]
    91  	if !exists {
    92  		return fmt.Errorf("tag does not exist: %s", tag)
    93  	}
    94  
    95  	layersSeen := make(map[string]bool)
    96  
    97  	layer, err := p.graph.Get(layerID)
    98  	if err != nil {
    99  		return err
   100  	}
   101  
   102  	m := &manifest.Manifest{
   103  		Versioned: manifest.Versioned{
   104  			SchemaVersion: 1,
   105  		},
   106  		Name:         p.repo.Name(),
   107  		Tag:          tag,
   108  		Architecture: layer.Architecture,
   109  		FSLayers:     []manifest.FSLayer{},
   110  		History:      []manifest.History{},
   111  	}
   112  
   113  	var metadata runconfig.Config
   114  	if layer != nil && layer.Config != nil {
   115  		metadata = *layer.Config
   116  	}
   117  
   118  	out := p.config.OutStream
   119  
   120  	for ; layer != nil; layer, err = p.graph.GetParent(layer) {
   121  		if err != nil {
   122  			return err
   123  		}
   124  
   125  		// break early if layer has already been seen in this image,
   126  		// this prevents infinite loops on layers which loopback, this
   127  		// cannot be prevented since layer IDs are not merkle hashes
   128  		// TODO(dmcgowan): throw error if no valid use case is found
   129  		if layersSeen[layer.ID] {
   130  			break
   131  		}
   132  
   133  		logrus.Debugf("Pushing layer: %s", layer.ID)
   134  
   135  		if layer.Config != nil && metadata.Image != layer.ID {
   136  			if err := runconfig.Merge(&metadata, layer.Config); err != nil {
   137  				return err
   138  			}
   139  		}
   140  
   141  		jsonData, err := p.graph.RawJSON(layer.ID)
   142  		if err != nil {
   143  			return fmt.Errorf("cannot retrieve the path for %s: %s", layer.ID, err)
   144  		}
   145  
   146  		var exists bool
   147  		dgst, err := p.graph.GetDigest(layer.ID)
   148  		switch err {
   149  		case nil:
   150  			if p.layersPushed[dgst] {
   151  				exists = true
   152  				// break out of switch, it is already known that
   153  				// the push is not needed and therefore doing a
   154  				// stat is unnecessary
   155  				break
   156  			}
   157  			_, err := p.repo.Blobs(context.Background()).Stat(context.Background(), dgst)
   158  			switch err {
   159  			case nil:
   160  				exists = true
   161  				out.Write(p.sf.FormatProgress(stringid.TruncateID(layer.ID), "Image already exists", nil))
   162  			case distribution.ErrBlobUnknown:
   163  				// nop
   164  			default:
   165  				out.Write(p.sf.FormatProgress(stringid.TruncateID(layer.ID), "Image push failed", nil))
   166  				return err
   167  			}
   168  		case ErrDigestNotSet:
   169  			// nop
   170  		case digest.ErrDigestInvalidFormat, digest.ErrDigestUnsupported:
   171  			return fmt.Errorf("error getting image checksum: %v", err)
   172  		}
   173  
   174  		// if digest was empty or not saved, or if blob does not exist on the remote repository,
   175  		// then fetch it.
   176  		if !exists {
   177  			if pushDigest, err := p.pushV2Image(p.repo.Blobs(context.Background()), layer); err != nil {
   178  				return err
   179  			} else if pushDigest != dgst {
   180  				// Cache new checksum
   181  				if err := p.graph.SetDigest(layer.ID, pushDigest); err != nil {
   182  					return err
   183  				}
   184  				dgst = pushDigest
   185  			}
   186  		}
   187  
   188  		m.FSLayers = append(m.FSLayers, manifest.FSLayer{BlobSum: dgst})
   189  		m.History = append(m.History, manifest.History{V1Compatibility: string(jsonData)})
   190  
   191  		layersSeen[layer.ID] = true
   192  		p.layersPushed[dgst] = true
   193  	}
   194  
   195  	logrus.Infof("Signed manifest for %s:%s using daemon's key: %s", p.repo.Name(), tag, p.trustKey.KeyID())
   196  	signed, err := manifest.Sign(m, p.trustKey)
   197  	if err != nil {
   198  		return err
   199  	}
   200  
   201  	manifestDigest, manifestSize, err := digestFromManifest(signed, p.repo.Name())
   202  	if err != nil {
   203  		return err
   204  	}
   205  	if manifestDigest != "" {
   206  		out.Write(p.sf.FormatStatus("", "%s: digest: %s size: %d", tag, manifestDigest, manifestSize))
   207  	}
   208  
   209  	manSvc, err := p.repo.Manifests(context.Background())
   210  	if err != nil {
   211  		return err
   212  	}
   213  	return manSvc.Put(signed)
   214  }
   215  
   216  func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *image.Image) (digest.Digest, error) {
   217  	out := p.config.OutStream
   218  
   219  	out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Preparing", nil))
   220  
   221  	image, err := p.graph.Get(img.ID)
   222  	if err != nil {
   223  		return "", err
   224  	}
   225  	arch, err := p.graph.TarLayer(image)
   226  	if err != nil {
   227  		return "", err
   228  	}
   229  	defer arch.Close()
   230  
   231  	// Send the layer
   232  	layerUpload, err := bs.Create(context.Background())
   233  	if err != nil {
   234  		return "", err
   235  	}
   236  	defer layerUpload.Close()
   237  
   238  	digester := digest.Canonical.New()
   239  	tee := io.TeeReader(arch, digester.Hash())
   240  
   241  	reader := progressreader.New(progressreader.Config{
   242  		In:        ioutil.NopCloser(tee), // we'll take care of close here.
   243  		Out:       out,
   244  		Formatter: p.sf,
   245  
   246  		// TODO(stevvooe): This may cause a size reporting error. Try to get
   247  		// this from tar-split or elsewhere. The main issue here is that we
   248  		// don't want to buffer to disk *just* to calculate the size.
   249  		Size: img.Size,
   250  
   251  		NewLines: false,
   252  		ID:       stringid.TruncateID(img.ID),
   253  		Action:   "Pushing",
   254  	})
   255  
   256  	out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pushing", nil))
   257  	nn, err := io.Copy(layerUpload, reader)
   258  	if err != nil {
   259  		return "", err
   260  	}
   261  
   262  	dgst := digester.Digest()
   263  	if _, err := layerUpload.Commit(context.Background(), distribution.Descriptor{Digest: dgst}); err != nil {
   264  		return "", err
   265  	}
   266  
   267  	logrus.Debugf("uploaded layer %s (%s), %d bytes", img.ID, dgst, nn)
   268  	out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pushed", nil))
   269  
   270  	return dgst, nil
   271  }