github.com/robertojrojas/docker@v1.9.1/graph/push_v2.go (about)

     1  package graph
     2  
     3  import (
     4  	"bufio"
     5  	"compress/gzip"
     6  	"encoding/json"
     7  	"errors"
     8  	"fmt"
     9  	"io"
    10  	"io/ioutil"
    11  
    12  	"github.com/Sirupsen/logrus"
    13  	"github.com/docker/distribution"
    14  	"github.com/docker/distribution/digest"
    15  	"github.com/docker/distribution/manifest"
    16  	"github.com/docker/docker/image"
    17  	"github.com/docker/docker/pkg/progressreader"
    18  	"github.com/docker/docker/pkg/streamformatter"
    19  	"github.com/docker/docker/pkg/stringid"
    20  	"github.com/docker/docker/registry"
    21  	"github.com/docker/docker/runconfig"
    22  	"github.com/docker/docker/utils"
    23  	"golang.org/x/net/context"
    24  )
    25  
    26  const compressionBufSize = 32768
    27  
    28  type v2Pusher struct {
    29  	*TagStore
    30  	endpoint  registry.APIEndpoint
    31  	localRepo Repository
    32  	repoInfo  *registry.RepositoryInfo
    33  	config    *ImagePushConfig
    34  	sf        *streamformatter.StreamFormatter
    35  	repo      distribution.Repository
    36  
    37  	// layersPushed is the set of layers known to exist on the remote side.
    38  	// This avoids redundant queries when pushing multiple tags that
    39  	// involve the same layers.
    40  	layersPushed map[digest.Digest]bool
    41  }
    42  
    43  func (p *v2Pusher) Push() (fallback bool, err error) {
    44  	p.repo, err = NewV2Repository(p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull")
    45  	if err != nil {
    46  		logrus.Debugf("Error getting v2 registry: %v", err)
    47  		return true, err
    48  	}
    49  	return false, p.pushV2Repository(p.config.Tag)
    50  }
    51  
    52  func (p *v2Pusher) getImageTags(askedTag string) ([]string, error) {
    53  	logrus.Debugf("Checking %q against %#v", askedTag, p.localRepo)
    54  	if len(askedTag) > 0 {
    55  		if _, ok := p.localRepo[askedTag]; !ok || utils.DigestReference(askedTag) {
    56  			return nil, fmt.Errorf("Tag does not exist for %s", askedTag)
    57  		}
    58  		return []string{askedTag}, nil
    59  	}
    60  	var tags []string
    61  	for tag := range p.localRepo {
    62  		if !utils.DigestReference(tag) {
    63  			tags = append(tags, tag)
    64  		}
    65  	}
    66  	return tags, nil
    67  }
    68  
    69  func (p *v2Pusher) pushV2Repository(tag string) error {
    70  	localName := p.repoInfo.LocalName
    71  	if _, found := p.poolAdd("push", localName); found {
    72  		return fmt.Errorf("push or pull %s is already in progress", localName)
    73  	}
    74  	defer p.poolRemove("push", localName)
    75  
    76  	tags, err := p.getImageTags(tag)
    77  	if err != nil {
    78  		return fmt.Errorf("error getting tags for %s: %s", localName, err)
    79  	}
    80  	if len(tags) == 0 {
    81  		return fmt.Errorf("no tags to push for %s", localName)
    82  	}
    83  
    84  	for _, tag := range tags {
    85  		if err := p.pushV2Tag(tag); err != nil {
    86  			return err
    87  		}
    88  	}
    89  
    90  	return nil
    91  }
    92  
    93  func (p *v2Pusher) pushV2Tag(tag string) error {
    94  	logrus.Debugf("Pushing repository: %s:%s", p.repo.Name(), tag)
    95  
    96  	layerID, exists := p.localRepo[tag]
    97  	if !exists {
    98  		return fmt.Errorf("tag does not exist: %s", tag)
    99  	}
   100  
   101  	layersSeen := make(map[string]bool)
   102  
   103  	layer, err := p.graph.Get(layerID)
   104  	if err != nil {
   105  		return err
   106  	}
   107  
   108  	m := &manifest.Manifest{
   109  		Versioned: manifest.Versioned{
   110  			SchemaVersion: 1,
   111  		},
   112  		Name:         p.repo.Name(),
   113  		Tag:          tag,
   114  		Architecture: layer.Architecture,
   115  		FSLayers:     []manifest.FSLayer{},
   116  		History:      []manifest.History{},
   117  	}
   118  
   119  	var metadata runconfig.Config
   120  	if layer != nil && layer.Config != nil {
   121  		metadata = *layer.Config
   122  	}
   123  
   124  	out := p.config.OutStream
   125  
   126  	for ; layer != nil; layer, err = p.graph.GetParent(layer) {
   127  		if err != nil {
   128  			return err
   129  		}
   130  
   131  		// break early if layer has already been seen in this image,
   132  		// this prevents infinite loops on layers which loopback, this
   133  		// cannot be prevented since layer IDs are not merkle hashes
   134  		// TODO(dmcgowan): throw error if no valid use case is found
   135  		if layersSeen[layer.ID] {
   136  			break
   137  		}
   138  
   139  		logrus.Debugf("Pushing layer: %s", layer.ID)
   140  
   141  		if layer.Config != nil && metadata.Image != layer.ID {
   142  			if err := runconfig.Merge(&metadata, layer.Config); err != nil {
   143  				return err
   144  			}
   145  		}
   146  
   147  		var exists bool
   148  		dgst, err := p.graph.GetLayerDigest(layer.ID)
   149  		switch err {
   150  		case nil:
   151  			if p.layersPushed[dgst] {
   152  				exists = true
   153  				// break out of switch, it is already known that
   154  				// the push is not needed and therefore doing a
   155  				// stat is unnecessary
   156  				break
   157  			}
   158  			_, err := p.repo.Blobs(context.Background()).Stat(context.Background(), dgst)
   159  			switch err {
   160  			case nil:
   161  				exists = true
   162  				out.Write(p.sf.FormatProgress(stringid.TruncateID(layer.ID), "Image already exists", nil))
   163  			case distribution.ErrBlobUnknown:
   164  				// nop
   165  			default:
   166  				out.Write(p.sf.FormatProgress(stringid.TruncateID(layer.ID), "Image push failed", nil))
   167  				return err
   168  			}
   169  		case ErrDigestNotSet:
   170  			// nop
   171  		case digest.ErrDigestInvalidFormat, digest.ErrDigestUnsupported:
   172  			return fmt.Errorf("error getting image checksum: %v", err)
   173  		}
   174  
   175  		// if digest was empty or not saved, or if blob does not exist on the remote repository,
   176  		// then fetch it.
   177  		if !exists {
   178  			var pushDigest digest.Digest
   179  			if pushDigest, err = p.pushV2Image(p.repo.Blobs(context.Background()), layer); err != nil {
   180  				return err
   181  			}
   182  			if dgst == "" {
   183  				// Cache new checksum
   184  				if err := p.graph.SetLayerDigest(layer.ID, pushDigest); err != nil {
   185  					return err
   186  				}
   187  			}
   188  			dgst = pushDigest
   189  		}
   190  
   191  		// read v1Compatibility config, generate new if needed
   192  		jsonData, err := p.graph.GenerateV1CompatibilityChain(layer.ID)
   193  		if err != nil {
   194  			return err
   195  		}
   196  
   197  		m.FSLayers = append(m.FSLayers, manifest.FSLayer{BlobSum: dgst})
   198  		m.History = append(m.History, manifest.History{V1Compatibility: string(jsonData)})
   199  
   200  		layersSeen[layer.ID] = true
   201  		p.layersPushed[dgst] = true
   202  	}
   203  
   204  	// Fix parent chain if necessary
   205  	if err = fixHistory(m); err != nil {
   206  		return err
   207  	}
   208  
   209  	logrus.Infof("Signed manifest for %s:%s using daemon's key: %s", p.repo.Name(), tag, p.trustKey.KeyID())
   210  	signed, err := manifest.Sign(m, p.trustKey)
   211  	if err != nil {
   212  		return err
   213  	}
   214  
   215  	manifestDigest, manifestSize, err := digestFromManifest(signed, p.repo.Name())
   216  	if err != nil {
   217  		return err
   218  	}
   219  	if manifestDigest != "" {
   220  		out.Write(p.sf.FormatStatus("", "%s: digest: %s size: %d", tag, manifestDigest, manifestSize))
   221  	}
   222  
   223  	manSvc, err := p.repo.Manifests(context.Background())
   224  	if err != nil {
   225  		return err
   226  	}
   227  	return manSvc.Put(signed)
   228  }
   229  
   230  // fixHistory makes sure that the manifest has parent IDs that are consistent
   231  // with its image IDs. Because local image IDs are generated from the
   232  // configuration and filesystem contents, but IDs in the manifest are preserved
   233  // from the original pull, it's possible to have inconsistencies where parent
   234  // IDs don't match up with the other IDs in the manifest. This happens in the
   235  // case where an engine pulls images where are identical except the IDs from the
   236  // manifest - the local ID will be the same, and one of the v1Compatibility
   237  // files gets discarded.
   238  func fixHistory(m *manifest.Manifest) error {
   239  	var lastID string
   240  
   241  	for i := len(m.History) - 1; i >= 0; i-- {
   242  		var historyEntry map[string]*json.RawMessage
   243  		if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), &historyEntry); err != nil {
   244  			return err
   245  		}
   246  
   247  		idJSON, present := historyEntry["id"]
   248  		if !present || idJSON == nil {
   249  			return errors.New("missing id key in v1compatibility file")
   250  		}
   251  		var id string
   252  		if err := json.Unmarshal(*idJSON, &id); err != nil {
   253  			return err
   254  		}
   255  
   256  		parentJSON, present := historyEntry["parent"]
   257  
   258  		if i == len(m.History)-1 {
   259  			// The base layer must not reference a parent layer,
   260  			// otherwise the manifest is incomplete. There is an
   261  			// exception for Windows to handle base layers.
   262  			if present && parentJSON != nil {
   263  				var parent string
   264  				if err := json.Unmarshal(*parentJSON, &parent); err != nil {
   265  					return err
   266  				}
   267  				if parent != "" {
   268  					logrus.Debugf("parent id mismatch detected; fixing. parent reference: %s", parent)
   269  					delete(historyEntry, "parent")
   270  					fixedHistory, err := json.Marshal(historyEntry)
   271  					if err != nil {
   272  						return err
   273  					}
   274  					m.History[i].V1Compatibility = string(fixedHistory)
   275  				}
   276  			}
   277  		} else {
   278  			// For all other layers, the parent ID should equal the
   279  			// ID of the next item in the history list. If it
   280  			// doesn't, fix it up (but preserve all other fields,
   281  			// possibly including fields that aren't known to this
   282  			// engine version).
   283  			if !present || parentJSON == nil {
   284  				return errors.New("missing parent key in v1compatibility file")
   285  			}
   286  			var parent string
   287  			if err := json.Unmarshal(*parentJSON, &parent); err != nil {
   288  				return err
   289  			}
   290  			if parent != lastID {
   291  				logrus.Debugf("parent id mismatch detected; fixing. parent reference: %s actual id: %s", parent, id)
   292  				historyEntry["parent"] = rawJSON(lastID)
   293  				fixedHistory, err := json.Marshal(historyEntry)
   294  				if err != nil {
   295  					return err
   296  				}
   297  				m.History[i].V1Compatibility = string(fixedHistory)
   298  			}
   299  		}
   300  		lastID = id
   301  	}
   302  
   303  	return nil
   304  }
   305  
   306  func rawJSON(value interface{}) *json.RawMessage {
   307  	jsonval, err := json.Marshal(value)
   308  	if err != nil {
   309  		return nil
   310  	}
   311  	return (*json.RawMessage)(&jsonval)
   312  }
   313  
   314  func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *image.Image) (digest.Digest, error) {
   315  	out := p.config.OutStream
   316  
   317  	out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Preparing", nil))
   318  
   319  	image, err := p.graph.Get(img.ID)
   320  	if err != nil {
   321  		return "", err
   322  	}
   323  	arch, err := p.graph.TarLayer(image)
   324  	if err != nil {
   325  		return "", err
   326  	}
   327  	defer arch.Close()
   328  
   329  	// Send the layer
   330  	layerUpload, err := bs.Create(context.Background())
   331  	if err != nil {
   332  		return "", err
   333  	}
   334  	defer layerUpload.Close()
   335  
   336  	reader := progressreader.New(progressreader.Config{
   337  		In:        ioutil.NopCloser(arch), // we'll take care of close here.
   338  		Out:       out,
   339  		Formatter: p.sf,
   340  
   341  		// TODO(stevvooe): This may cause a size reporting error. Try to get
   342  		// this from tar-split or elsewhere. The main issue here is that we
   343  		// don't want to buffer to disk *just* to calculate the size.
   344  		Size: img.Size,
   345  
   346  		NewLines: false,
   347  		ID:       stringid.TruncateID(img.ID),
   348  		Action:   "Pushing",
   349  	})
   350  
   351  	digester := digest.Canonical.New()
   352  	// HACK: The MultiWriter doesn't write directly to layerUpload because
   353  	// we must make sure the ReadFrom is used, not Write. Using Write would
   354  	// send a PATCH request for every Write call.
   355  	pipeReader, pipeWriter := io.Pipe()
   356  	// Use a bufio.Writer to avoid excessive chunking in HTTP request.
   357  	bufWriter := bufio.NewWriterSize(io.MultiWriter(pipeWriter, digester.Hash()), compressionBufSize)
   358  	compressor := gzip.NewWriter(bufWriter)
   359  
   360  	go func() {
   361  		_, err := io.Copy(compressor, reader)
   362  		if err == nil {
   363  			err = compressor.Close()
   364  		}
   365  		if err == nil {
   366  			err = bufWriter.Flush()
   367  		}
   368  		if err != nil {
   369  			pipeWriter.CloseWithError(err)
   370  		} else {
   371  			pipeWriter.Close()
   372  		}
   373  	}()
   374  
   375  	out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pushing", nil))
   376  	nn, err := layerUpload.ReadFrom(pipeReader)
   377  	pipeReader.Close()
   378  	if err != nil {
   379  		return "", err
   380  	}
   381  
   382  	dgst := digester.Digest()
   383  	if _, err := layerUpload.Commit(context.Background(), distribution.Descriptor{Digest: dgst}); err != nil {
   384  		return "", err
   385  	}
   386  
   387  	logrus.Debugf("uploaded layer %s (%s), %d bytes", img.ID, dgst, nn)
   388  	out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pushed", nil))
   389  
   390  	return dgst, nil
   391  }