github.com/damirazo/docker@v1.9.0/graph/push_v2.go (about)

     1  package graph
     2  
     3  import (
     4  	"bufio"
     5  	"compress/gzip"
     6  	"fmt"
     7  	"io"
     8  	"io/ioutil"
     9  
    10  	"github.com/Sirupsen/logrus"
    11  	"github.com/docker/distribution"
    12  	"github.com/docker/distribution/digest"
    13  	"github.com/docker/distribution/manifest"
    14  	"github.com/docker/docker/image"
    15  	"github.com/docker/docker/pkg/progressreader"
    16  	"github.com/docker/docker/pkg/streamformatter"
    17  	"github.com/docker/docker/pkg/stringid"
    18  	"github.com/docker/docker/registry"
    19  	"github.com/docker/docker/runconfig"
    20  	"github.com/docker/docker/utils"
    21  	"golang.org/x/net/context"
    22  )
    23  
    24  const compressionBufSize = 32768
    25  
    26  type v2Pusher struct {
    27  	*TagStore
    28  	endpoint  registry.APIEndpoint
    29  	localRepo Repository
    30  	repoInfo  *registry.RepositoryInfo
    31  	config    *ImagePushConfig
    32  	sf        *streamformatter.StreamFormatter
    33  	repo      distribution.Repository
    34  
    35  	// layersPushed is the set of layers known to exist on the remote side.
    36  	// This avoids redundant queries when pushing multiple tags that
    37  	// involve the same layers.
    38  	layersPushed map[digest.Digest]bool
    39  }
    40  
    41  func (p *v2Pusher) Push() (fallback bool, err error) {
    42  	p.repo, err = NewV2Repository(p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull")
    43  	if err != nil {
    44  		logrus.Debugf("Error getting v2 registry: %v", err)
    45  		return true, err
    46  	}
    47  	return false, p.pushV2Repository(p.config.Tag)
    48  }
    49  
    50  func (p *v2Pusher) getImageTags(askedTag string) ([]string, error) {
    51  	logrus.Debugf("Checking %q against %#v", askedTag, p.localRepo)
    52  	if len(askedTag) > 0 {
    53  		if _, ok := p.localRepo[askedTag]; !ok || utils.DigestReference(askedTag) {
    54  			return nil, fmt.Errorf("Tag does not exist for %s", askedTag)
    55  		}
    56  		return []string{askedTag}, nil
    57  	}
    58  	var tags []string
    59  	for tag := range p.localRepo {
    60  		if !utils.DigestReference(tag) {
    61  			tags = append(tags, tag)
    62  		}
    63  	}
    64  	return tags, nil
    65  }
    66  
    67  func (p *v2Pusher) pushV2Repository(tag string) error {
    68  	localName := p.repoInfo.LocalName
    69  	if _, found := p.poolAdd("push", localName); found {
    70  		return fmt.Errorf("push or pull %s is already in progress", localName)
    71  	}
    72  	defer p.poolRemove("push", localName)
    73  
    74  	tags, err := p.getImageTags(tag)
    75  	if err != nil {
    76  		return fmt.Errorf("error getting tags for %s: %s", localName, err)
    77  	}
    78  	if len(tags) == 0 {
    79  		return fmt.Errorf("no tags to push for %s", localName)
    80  	}
    81  
    82  	for _, tag := range tags {
    83  		if err := p.pushV2Tag(tag); err != nil {
    84  			return err
    85  		}
    86  	}
    87  
    88  	return nil
    89  }
    90  
    91  func (p *v2Pusher) pushV2Tag(tag string) error {
    92  	logrus.Debugf("Pushing repository: %s:%s", p.repo.Name(), tag)
    93  
    94  	layerID, exists := p.localRepo[tag]
    95  	if !exists {
    96  		return fmt.Errorf("tag does not exist: %s", tag)
    97  	}
    98  
    99  	layersSeen := make(map[string]bool)
   100  
   101  	layer, err := p.graph.Get(layerID)
   102  	if err != nil {
   103  		return err
   104  	}
   105  
   106  	m := &manifest.Manifest{
   107  		Versioned: manifest.Versioned{
   108  			SchemaVersion: 1,
   109  		},
   110  		Name:         p.repo.Name(),
   111  		Tag:          tag,
   112  		Architecture: layer.Architecture,
   113  		FSLayers:     []manifest.FSLayer{},
   114  		History:      []manifest.History{},
   115  	}
   116  
   117  	var metadata runconfig.Config
   118  	if layer != nil && layer.Config != nil {
   119  		metadata = *layer.Config
   120  	}
   121  
   122  	out := p.config.OutStream
   123  
   124  	for ; layer != nil; layer, err = p.graph.GetParent(layer) {
   125  		if err != nil {
   126  			return err
   127  		}
   128  
   129  		// break early if layer has already been seen in this image,
   130  		// this prevents infinite loops on layers which loopback, this
   131  		// cannot be prevented since layer IDs are not merkle hashes
   132  		// TODO(dmcgowan): throw error if no valid use case is found
   133  		if layersSeen[layer.ID] {
   134  			break
   135  		}
   136  
   137  		logrus.Debugf("Pushing layer: %s", layer.ID)
   138  
   139  		if layer.Config != nil && metadata.Image != layer.ID {
   140  			if err := runconfig.Merge(&metadata, layer.Config); err != nil {
   141  				return err
   142  			}
   143  		}
   144  
   145  		var exists bool
   146  		dgst, err := p.graph.GetLayerDigest(layer.ID)
   147  		switch err {
   148  		case nil:
   149  			if p.layersPushed[dgst] {
   150  				exists = true
   151  				// break out of switch, it is already known that
   152  				// the push is not needed and therefore doing a
   153  				// stat is unnecessary
   154  				break
   155  			}
   156  			_, err := p.repo.Blobs(context.Background()).Stat(context.Background(), dgst)
   157  			switch err {
   158  			case nil:
   159  				exists = true
   160  				out.Write(p.sf.FormatProgress(stringid.TruncateID(layer.ID), "Image already exists", nil))
   161  			case distribution.ErrBlobUnknown:
   162  				// nop
   163  			default:
   164  				out.Write(p.sf.FormatProgress(stringid.TruncateID(layer.ID), "Image push failed", nil))
   165  				return err
   166  			}
   167  		case ErrDigestNotSet:
   168  			// nop
   169  		case digest.ErrDigestInvalidFormat, digest.ErrDigestUnsupported:
   170  			return fmt.Errorf("error getting image checksum: %v", err)
   171  		}
   172  
   173  		// if digest was empty or not saved, or if blob does not exist on the remote repository,
   174  		// then fetch it.
   175  		if !exists {
   176  			var pushDigest digest.Digest
   177  			if pushDigest, err = p.pushV2Image(p.repo.Blobs(context.Background()), layer); err != nil {
   178  				return err
   179  			}
   180  			if dgst == "" {
   181  				// Cache new checksum
   182  				if err := p.graph.SetLayerDigest(layer.ID, pushDigest); err != nil {
   183  					return err
   184  				}
   185  			}
   186  			dgst = pushDigest
   187  		}
   188  
   189  		// read v1Compatibility config, generate new if needed
   190  		jsonData, err := p.graph.GenerateV1CompatibilityChain(layer.ID)
   191  		if err != nil {
   192  			return err
   193  		}
   194  
   195  		m.FSLayers = append(m.FSLayers, manifest.FSLayer{BlobSum: dgst})
   196  		m.History = append(m.History, manifest.History{V1Compatibility: string(jsonData)})
   197  
   198  		layersSeen[layer.ID] = true
   199  		p.layersPushed[dgst] = true
   200  	}
   201  
   202  	logrus.Infof("Signed manifest for %s:%s using daemon's key: %s", p.repo.Name(), tag, p.trustKey.KeyID())
   203  	signed, err := manifest.Sign(m, p.trustKey)
   204  	if err != nil {
   205  		return err
   206  	}
   207  
   208  	manifestDigest, manifestSize, err := digestFromManifest(signed, p.repo.Name())
   209  	if err != nil {
   210  		return err
   211  	}
   212  	if manifestDigest != "" {
   213  		out.Write(p.sf.FormatStatus("", "%s: digest: %s size: %d", tag, manifestDigest, manifestSize))
   214  	}
   215  
   216  	manSvc, err := p.repo.Manifests(context.Background())
   217  	if err != nil {
   218  		return err
   219  	}
   220  	return manSvc.Put(signed)
   221  }
   222  
   223  func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *image.Image) (digest.Digest, error) {
   224  	out := p.config.OutStream
   225  
   226  	out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Preparing", nil))
   227  
   228  	image, err := p.graph.Get(img.ID)
   229  	if err != nil {
   230  		return "", err
   231  	}
   232  	arch, err := p.graph.TarLayer(image)
   233  	if err != nil {
   234  		return "", err
   235  	}
   236  	defer arch.Close()
   237  
   238  	// Send the layer
   239  	layerUpload, err := bs.Create(context.Background())
   240  	if err != nil {
   241  		return "", err
   242  	}
   243  	defer layerUpload.Close()
   244  
   245  	reader := progressreader.New(progressreader.Config{
   246  		In:        ioutil.NopCloser(arch), // we'll take care of close here.
   247  		Out:       out,
   248  		Formatter: p.sf,
   249  
   250  		// TODO(stevvooe): This may cause a size reporting error. Try to get
   251  		// this from tar-split or elsewhere. The main issue here is that we
   252  		// don't want to buffer to disk *just* to calculate the size.
   253  		Size: img.Size,
   254  
   255  		NewLines: false,
   256  		ID:       stringid.TruncateID(img.ID),
   257  		Action:   "Pushing",
   258  	})
   259  
   260  	digester := digest.Canonical.New()
   261  	// HACK: The MultiWriter doesn't write directly to layerUpload because
   262  	// we must make sure the ReadFrom is used, not Write. Using Write would
   263  	// send a PATCH request for every Write call.
   264  	pipeReader, pipeWriter := io.Pipe()
   265  	// Use a bufio.Writer to avoid excessive chunking in HTTP request.
   266  	bufWriter := bufio.NewWriterSize(io.MultiWriter(pipeWriter, digester.Hash()), compressionBufSize)
   267  	compressor := gzip.NewWriter(bufWriter)
   268  
   269  	go func() {
   270  		_, err := io.Copy(compressor, reader)
   271  		if err == nil {
   272  			err = compressor.Close()
   273  		}
   274  		if err == nil {
   275  			err = bufWriter.Flush()
   276  		}
   277  		if err != nil {
   278  			pipeWriter.CloseWithError(err)
   279  		} else {
   280  			pipeWriter.Close()
   281  		}
   282  	}()
   283  
   284  	out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pushing", nil))
   285  	nn, err := layerUpload.ReadFrom(pipeReader)
   286  	pipeReader.Close()
   287  	if err != nil {
   288  		return "", err
   289  	}
   290  
   291  	dgst := digester.Digest()
   292  	if _, err := layerUpload.Commit(context.Background(), distribution.Descriptor{Digest: dgst}); err != nil {
   293  		return "", err
   294  	}
   295  
   296  	logrus.Debugf("uploaded layer %s (%s), %d bytes", img.ID, dgst, nn)
   297  	out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pushed", nil))
   298  
   299  	return dgst, nil
   300  }