github.com/anuvu/zot@v1.3.4/pkg/storage/s3/storage.go (about)

     1  package s3
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"crypto/sha256"
     7  	"encoding/json"
     8  	"fmt"
     9  	"io"
    10  	"path"
    11  	"path/filepath"
    12  	"strings"
    13  	"sync"
    14  
    15  	"github.com/anuvu/zot/errors"
    16  	"github.com/anuvu/zot/pkg/extensions/monitoring"
    17  	zlog "github.com/anuvu/zot/pkg/log"
    18  	"github.com/anuvu/zot/pkg/storage"
    19  	guuid "github.com/gofrs/uuid"
    20  	"github.com/notaryproject/notation-go-lib"
    21  	godigest "github.com/opencontainers/go-digest"
    22  	ispec "github.com/opencontainers/image-spec/specs-go/v1"
    23  	"github.com/rs/zerolog"
    24  
    25  	// Add s3 support
    26  	storageDriver "github.com/docker/distribution/registry/storage/driver"
    27  	_ "github.com/docker/distribution/registry/storage/driver/s3-aws" // Load s3 driver
    28  )
    29  
    30  // ObjectStorage provides the image storage operations.
    31  type ObjectStorage struct {
    32  	rootDir     string
    33  	store       storageDriver.StorageDriver
    34  	lock        *sync.RWMutex
    35  	blobUploads map[string]storage.BlobUpload
    36  	log         zerolog.Logger
    37  	// We must keep track of multi part uploads to s3, because the lib
    38  	// which we are using doesn't cancel multiparts uploads
    39  	// see: https://github.com/distribution/distribution/blob/main/registry/storage/driver/s3-aws/s3.go#L545
    40  	isMultiPartUpload map[string]bool
    41  	metrics           monitoring.MetricServer
    42  }
    43  
    44  func (is *ObjectStorage) RootDir() string {
    45  	return is.rootDir
    46  }
    47  
    48  func (is *ObjectStorage) DirExists(d string) bool {
    49  	if fi, err := is.store.Stat(context.Background(), d); err == nil && fi.IsDir() {
    50  		return true
    51  	}
    52  
    53  	return false
    54  }
    55  
    56  // NewObjectStorage returns a new image store backed by cloud storages.
    57  // see https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers
    58  func NewImageStore(rootDir string, gc bool, dedupe bool, log zlog.Logger, m monitoring.MetricServer,
    59  	store storageDriver.StorageDriver) storage.ImageStore {
    60  	is := &ObjectStorage{
    61  		rootDir:           rootDir,
    62  		store:             store,
    63  		lock:              &sync.RWMutex{},
    64  		blobUploads:       make(map[string]storage.BlobUpload),
    65  		log:               log.With().Caller().Logger(),
    66  		isMultiPartUpload: make(map[string]bool),
    67  		metrics:           m,
    68  	}
    69  
    70  	return is
    71  }
    72  
    73  // RLock read-lock.
    74  func (is *ObjectStorage) RLock() {
    75  	is.lock.RLock()
    76  }
    77  
    78  // RUnlock read-unlock.
    79  func (is *ObjectStorage) RUnlock() {
    80  	is.lock.RUnlock()
    81  }
    82  
    83  // Lock write-lock.
    84  func (is *ObjectStorage) Lock() {
    85  	is.lock.Lock()
    86  }
    87  
    88  // Unlock write-unlock.
    89  func (is *ObjectStorage) Unlock() {
    90  	is.lock.Unlock()
    91  }
    92  
    93  func (is *ObjectStorage) initRepo(name string) error {
    94  	repoDir := path.Join(is.rootDir, name)
    95  
    96  	if fi, err := is.store.Stat(context.Background(), repoDir); err == nil && fi.IsDir() {
    97  		return nil
    98  	}
    99  
   100  	// "oci-layout" file - create if it doesn't exist
   101  	ilPath := path.Join(repoDir, ispec.ImageLayoutFile)
   102  	if _, err := is.store.Stat(context.Background(), ilPath); err != nil {
   103  		il := ispec.ImageLayout{Version: ispec.ImageLayoutVersion}
   104  		buf, err := json.Marshal(il)
   105  
   106  		if err != nil {
   107  			is.log.Error().Err(err).Msg("unable to marshal JSON")
   108  			return err
   109  		}
   110  
   111  		if _, err := writeFile(is.store, ilPath, buf); err != nil {
   112  			is.log.Error().Err(err).Str("file", ilPath).Msg("unable to write file")
   113  			return err
   114  		}
   115  	}
   116  
   117  	// "index.json" file - create if it doesn't exist
   118  	indexPath := path.Join(repoDir, "index.json")
   119  	if _, err := is.store.Stat(context.Background(), indexPath); err != nil {
   120  		index := ispec.Index{}
   121  		index.SchemaVersion = 2
   122  		buf, err := json.Marshal(index)
   123  
   124  		if err != nil {
   125  			is.log.Error().Err(err).Msg("unable to marshal JSON")
   126  			return err
   127  		}
   128  
   129  		if _, err := writeFile(is.store, indexPath, buf); err != nil {
   130  			is.log.Error().Err(err).Str("file", ilPath).Msg("unable to write file")
   131  			return err
   132  		}
   133  	}
   134  
   135  	return nil
   136  }
   137  
   138  // InitRepo creates an image repository under this store.
   139  func (is *ObjectStorage) InitRepo(name string) error {
   140  	is.Lock()
   141  	defer is.Unlock()
   142  
   143  	return is.initRepo(name)
   144  }
   145  
   146  // ValidateRepo validates that the repository layout is complaint with the OCI repo layout.
   147  func (is *ObjectStorage) ValidateRepo(name string) (bool, error) {
   148  	// https://github.com/opencontainers/image-spec/blob/master/image-layout.md#content
   149  	// at least, expect at least 3 entries - ["blobs", "oci-layout", "index.json"]
   150  	// and an additional/optional BlobUploadDir in each image store
   151  	// for objects storage we can not create empty dirs, so we check only against index.json and oci-layout
   152  	dir := path.Join(is.rootDir, name)
   153  	if fi, err := is.store.Stat(context.Background(), dir); err != nil || !fi.IsDir() {
   154  		return false, errors.ErrRepoNotFound
   155  	}
   156  
   157  	files, err := is.store.List(context.Background(), dir)
   158  	if err != nil {
   159  		is.log.Error().Err(err).Str("dir", dir).Msg("unable to read directory")
   160  		return false, errors.ErrRepoNotFound
   161  	}
   162  
   163  	// nolint:gomnd
   164  	if len(files) < 2 {
   165  		return false, errors.ErrRepoBadVersion
   166  	}
   167  
   168  	found := map[string]bool{
   169  		ispec.ImageLayoutFile: false,
   170  		"index.json":          false,
   171  	}
   172  
   173  	for _, file := range files {
   174  		f, err := is.store.Stat(context.Background(), file)
   175  		if err != nil {
   176  			return false, err
   177  		}
   178  
   179  		if strings.HasSuffix(file, "blobs") && !f.IsDir() {
   180  			return false, nil
   181  		}
   182  
   183  		filename, err := filepath.Rel(dir, file)
   184  		if err != nil {
   185  			return false, err
   186  		}
   187  
   188  		found[filename] = true
   189  	}
   190  
   191  	for k, v := range found {
   192  		if !v && k != storage.BlobUploadDir {
   193  			return false, nil
   194  		}
   195  	}
   196  
   197  	buf, err := is.store.GetContent(context.Background(), path.Join(dir, ispec.ImageLayoutFile))
   198  	if err != nil {
   199  		return false, err
   200  	}
   201  
   202  	var il ispec.ImageLayout
   203  	if err := json.Unmarshal(buf, &il); err != nil {
   204  		return false, err
   205  	}
   206  
   207  	if il.Version != ispec.ImageLayoutVersion {
   208  		return false, errors.ErrRepoBadVersion
   209  	}
   210  
   211  	return true, nil
   212  }
   213  
   214  // GetRepositories returns a list of all the repositories under this store.
   215  func (is *ObjectStorage) GetRepositories() ([]string, error) {
   216  	dir := is.rootDir
   217  
   218  	is.RLock()
   219  	defer is.RUnlock()
   220  
   221  	stores := make([]string, 0)
   222  	err := is.store.Walk(context.Background(), dir, func(fileInfo storageDriver.FileInfo) error {
   223  		if !fileInfo.IsDir() {
   224  			return nil
   225  		}
   226  
   227  		rel, err := filepath.Rel(is.rootDir, fileInfo.Path())
   228  		if err != nil {
   229  			return nil
   230  		}
   231  
   232  		if ok, err := is.ValidateRepo(rel); !ok || err != nil {
   233  			return nil
   234  		}
   235  
   236  		stores = append(stores, rel)
   237  
   238  		return nil
   239  	})
   240  
   241  	// if the root directory is not yet created then return an empty slice of repositories
   242  	_, ok := err.(storageDriver.PathNotFoundError)
   243  	if ok {
   244  		return stores, nil
   245  	}
   246  
   247  	return stores, err
   248  }
   249  
   250  // GetImageTags returns a list of image tags available in the specified repository.
   251  func (is *ObjectStorage) GetImageTags(repo string) ([]string, error) {
   252  	dir := path.Join(is.rootDir, repo)
   253  	if fi, err := is.store.Stat(context.Background(), dir); err != nil || !fi.IsDir() {
   254  		return nil, errors.ErrRepoNotFound
   255  	}
   256  
   257  	is.RLock()
   258  	defer is.RUnlock()
   259  
   260  	buf, err := is.GetIndexContent(repo)
   261  	if err != nil {
   262  		return nil, err
   263  	}
   264  
   265  	var index ispec.Index
   266  	if err := json.Unmarshal(buf, &index); err != nil {
   267  		is.log.Error().Err(err).Str("dir", dir).Msg("invalid JSON")
   268  		return nil, errors.ErrRepoNotFound
   269  	}
   270  
   271  	tags := make([]string, 0)
   272  
   273  	for _, manifest := range index.Manifests {
   274  		v, ok := manifest.Annotations[ispec.AnnotationRefName]
   275  		if ok {
   276  			tags = append(tags, v)
   277  		}
   278  	}
   279  
   280  	return tags, nil
   281  }
   282  
   283  // GetImageManifest returns the image manifest of an image in the specific repository.
   284  func (is *ObjectStorage) GetImageManifest(repo string, reference string) ([]byte, string, string, error) {
   285  	dir := path.Join(is.rootDir, repo)
   286  	if fi, err := is.store.Stat(context.Background(), dir); err != nil || !fi.IsDir() {
   287  		return nil, "", "", errors.ErrRepoNotFound
   288  	}
   289  
   290  	is.RLock()
   291  	defer is.RUnlock()
   292  
   293  	buf, err := is.GetIndexContent(repo)
   294  	if err != nil {
   295  		return nil, "", "", err
   296  	}
   297  
   298  	var index ispec.Index
   299  	if err := json.Unmarshal(buf, &index); err != nil {
   300  		is.log.Error().Err(err).Str("dir", dir).Msg("invalid JSON")
   301  		return nil, "", "", err
   302  	}
   303  
   304  	found := false
   305  
   306  	var digest godigest.Digest
   307  
   308  	mediaType := ""
   309  
   310  	for _, m := range index.Manifests {
   311  		if reference == m.Digest.String() {
   312  			digest = m.Digest
   313  			mediaType = m.MediaType
   314  			found = true
   315  
   316  			break
   317  		}
   318  
   319  		v, ok := m.Annotations[ispec.AnnotationRefName]
   320  		if ok && v == reference {
   321  			digest = m.Digest
   322  			mediaType = m.MediaType
   323  			found = true
   324  
   325  			break
   326  		}
   327  	}
   328  
   329  	if !found {
   330  		return nil, "", "", errors.ErrManifestNotFound
   331  	}
   332  
   333  	p := path.Join(dir, "blobs", digest.Algorithm().String(), digest.Encoded())
   334  
   335  	buf, err = is.store.GetContent(context.Background(), p)
   336  	if err != nil {
   337  		is.log.Error().Err(err).Str("blob", p).Msg("failed to read manifest")
   338  		return nil, "", "", err
   339  	}
   340  
   341  	var manifest ispec.Manifest
   342  	if err := json.Unmarshal(buf, &manifest); err != nil {
   343  		is.log.Error().Err(err).Str("dir", dir).Msg("invalid JSON")
   344  		return nil, "", "", err
   345  	}
   346  
   347  	monitoring.IncDownloadCounter(is.metrics, repo)
   348  
   349  	return buf, digest.String(), mediaType, nil
   350  }
   351  
   352  // PutImageManifest adds an image manifest to the repository.
   353  func (is *ObjectStorage) PutImageManifest(repo string, reference string, mediaType string,
   354  	body []byte) (string, error) {
   355  	if err := is.InitRepo(repo); err != nil {
   356  		is.log.Debug().Err(err).Msg("init repo")
   357  		return "", err
   358  	}
   359  
   360  	if mediaType != ispec.MediaTypeImageManifest {
   361  		is.log.Debug().Interface("actual", mediaType).
   362  			Interface("expected", ispec.MediaTypeImageManifest).Msg("bad manifest media type")
   363  		return "", errors.ErrBadManifest
   364  	}
   365  
   366  	if len(body) == 0 {
   367  		is.log.Debug().Int("len", len(body)).Msg("invalid body length")
   368  		return "", errors.ErrBadManifest
   369  	}
   370  
   371  	var m ispec.Manifest
   372  	if err := json.Unmarshal(body, &m); err != nil {
   373  		is.log.Error().Err(err).Msg("unable to unmarshal JSON")
   374  		return "", errors.ErrBadManifest
   375  	}
   376  
   377  	if m.SchemaVersion != storage.SchemaVersion {
   378  		is.log.Error().Int("SchemaVersion", m.SchemaVersion).Msg("invalid manifest")
   379  		return "", errors.ErrBadManifest
   380  	}
   381  
   382  	for _, l := range m.Layers {
   383  		digest := l.Digest
   384  		blobPath := is.BlobPath(repo, digest)
   385  		is.log.Info().Str("blobPath", blobPath).Str("reference", reference).Msg("manifest layers")
   386  
   387  		if _, err := is.store.Stat(context.Background(), blobPath); err != nil {
   388  			is.log.Error().Err(err).Str("blobPath", blobPath).Msg("unable to find blob")
   389  			return digest.String(), errors.ErrBlobNotFound
   390  		}
   391  	}
   392  
   393  	mDigest := godigest.FromBytes(body)
   394  	refIsDigest := false
   395  	d, err := godigest.Parse(reference)
   396  
   397  	if err == nil {
   398  		if d.String() != mDigest.String() {
   399  			is.log.Error().Str("actual", mDigest.String()).Str("expected", d.String()).
   400  				Msg("manifest digest is not valid")
   401  			return "", errors.ErrBadManifest
   402  		}
   403  
   404  		refIsDigest = true
   405  	}
   406  
   407  	is.Lock()
   408  	defer is.Unlock()
   409  
   410  	dir := path.Join(is.rootDir, repo)
   411  
   412  	buf, err := is.GetIndexContent(repo)
   413  	if err != nil {
   414  		return "", err
   415  	}
   416  
   417  	var index ispec.Index
   418  	if err := json.Unmarshal(buf, &index); err != nil {
   419  		is.log.Error().Err(err).Str("dir", dir).Msg("invalid JSON")
   420  		return "", errors.ErrRepoBadVersion
   421  	}
   422  
   423  	updateIndex := true
   424  	// create a new descriptor
   425  	desc := ispec.Descriptor{MediaType: mediaType, Size: int64(len(body)), Digest: mDigest,
   426  		Platform: &ispec.Platform{Architecture: "amd64", OS: "linux"}}
   427  	if !refIsDigest {
   428  		desc.Annotations = map[string]string{ispec.AnnotationRefName: reference}
   429  	}
   430  
   431  	for i, m := range index.Manifests {
   432  		if reference == m.Digest.String() {
   433  			// nothing changed, so don't update
   434  			desc = m
   435  			updateIndex = false
   436  
   437  			break
   438  		}
   439  
   440  		v, ok := m.Annotations[ispec.AnnotationRefName]
   441  		if ok && v == reference {
   442  			if m.Digest.String() == mDigest.String() {
   443  				// nothing changed, so don't update
   444  				desc = m
   445  				updateIndex = false
   446  
   447  				break
   448  			}
   449  			// manifest contents have changed for the same tag,
   450  			// so update index.json descriptor
   451  			is.log.Info().
   452  				Int64("old size", desc.Size).
   453  				Int64("new size", int64(len(body))).
   454  				Str("old digest", desc.Digest.String()).
   455  				Str("new digest", mDigest.String()).
   456  				Msg("updating existing tag with new manifest contents")
   457  
   458  			desc = m
   459  			desc.Size = int64(len(body))
   460  			desc.Digest = mDigest
   461  
   462  			index.Manifests = append(index.Manifests[:i], index.Manifests[i+1:]...)
   463  
   464  			break
   465  		}
   466  	}
   467  
   468  	if !updateIndex {
   469  		return desc.Digest.String(), nil
   470  	}
   471  
   472  	// write manifest to "blobs"
   473  	dir = path.Join(is.rootDir, repo, "blobs", mDigest.Algorithm().String())
   474  	manifestPath := path.Join(dir, mDigest.Encoded())
   475  
   476  	if err = is.store.PutContent(context.Background(), manifestPath, body); err != nil {
   477  		is.log.Error().Err(err).Str("file", manifestPath).Msg("unable to write")
   478  		return "", err
   479  	}
   480  
   481  	// now update "index.json"
   482  	index.Manifests = append(index.Manifests, desc)
   483  	dir = path.Join(is.rootDir, repo)
   484  	indexPath := path.Join(dir, "index.json")
   485  	buf, err = json.Marshal(index)
   486  
   487  	if err != nil {
   488  		is.log.Error().Err(err).Str("file", indexPath).Msg("unable to marshal JSON")
   489  		return "", err
   490  	}
   491  
   492  	if err = is.store.PutContent(context.Background(), indexPath, buf); err != nil {
   493  		is.log.Error().Err(err).Str("file", manifestPath).Msg("unable to write")
   494  		return "", err
   495  	}
   496  
   497  	monitoring.SetStorageUsage(is.metrics, is.rootDir, repo)
   498  	monitoring.IncUploadCounter(is.metrics, repo)
   499  
   500  	return desc.Digest.String(), nil
   501  }
   502  
   503  // DeleteImageManifest deletes the image manifest from the repository.
   504  func (is *ObjectStorage) DeleteImageManifest(repo string, reference string) error {
   505  	dir := path.Join(is.rootDir, repo)
   506  	if fi, err := is.store.Stat(context.Background(), dir); err != nil || !fi.IsDir() {
   507  		return errors.ErrRepoNotFound
   508  	}
   509  
   510  	isTag := false
   511  
   512  	// as per spec "reference" can only be a digest and not a tag
   513  	digest, err := godigest.Parse(reference)
   514  	if err != nil {
   515  		is.log.Debug().Str("invalid digest: ", reference).Msg("storage: assuming tag")
   516  
   517  		isTag = true
   518  	}
   519  
   520  	is.Lock()
   521  	defer is.Unlock()
   522  
   523  	buf, err := is.GetIndexContent(repo)
   524  	if err != nil {
   525  		return err
   526  	}
   527  
   528  	var index ispec.Index
   529  	if err := json.Unmarshal(buf, &index); err != nil {
   530  		is.log.Error().Err(err).Str("dir", dir).Msg("invalid JSON")
   531  		return err
   532  	}
   533  
   534  	found := false
   535  
   536  	var m ispec.Descriptor
   537  
   538  	// we are deleting, so keep only those manifests that don't match
   539  	outIndex := index
   540  	outIndex.Manifests = []ispec.Descriptor{}
   541  
   542  	for _, m = range index.Manifests {
   543  		if isTag {
   544  			tag, ok := m.Annotations[ispec.AnnotationRefName]
   545  			if ok && tag == reference {
   546  				is.log.Debug().Str("deleting tag", tag).Msg("")
   547  
   548  				digest = m.Digest
   549  
   550  				found = true
   551  
   552  				continue
   553  			}
   554  		} else if reference == m.Digest.String() {
   555  			is.log.Debug().Str("deleting reference", reference).Msg("")
   556  			found = true
   557  			continue
   558  		}
   559  
   560  		outIndex.Manifests = append(outIndex.Manifests, m)
   561  	}
   562  
   563  	if !found {
   564  		return errors.ErrManifestNotFound
   565  	}
   566  
   567  	// now update "index.json"
   568  	dir = path.Join(is.rootDir, repo)
   569  	file := path.Join(dir, "index.json")
   570  	buf, err = json.Marshal(outIndex)
   571  
   572  	if err != nil {
   573  		return err
   574  	}
   575  
   576  	if _, err := writeFile(is.store, file, buf); err != nil {
   577  		is.log.Debug().Str("deleting reference", reference).Msg("")
   578  		return err
   579  	}
   580  
   581  	// Delete blob only when blob digest not present in manifest entry.
   582  	// e.g. 1.0.1 & 1.0.2 have same blob digest so if we delete 1.0.1, blob should not be removed.
   583  	toDelete := true
   584  
   585  	for _, m = range outIndex.Manifests {
   586  		if digest.String() == m.Digest.String() {
   587  			toDelete = false
   588  			break
   589  		}
   590  	}
   591  
   592  	if toDelete {
   593  		p := path.Join(dir, "blobs", digest.Algorithm().String(), digest.Encoded())
   594  
   595  		err = is.store.Delete(context.Background(), p)
   596  		if err != nil {
   597  			return err
   598  		}
   599  	}
   600  
   601  	monitoring.SetStorageUsage(is.metrics, is.rootDir, repo)
   602  
   603  	return nil
   604  }
   605  
   606  // BlobUploadPath returns the upload path for a blob in this store.
   607  func (is *ObjectStorage) BlobUploadPath(repo string, uuid string) string {
   608  	dir := path.Join(is.rootDir, repo)
   609  	blobUploadPath := path.Join(dir, storage.BlobUploadDir, uuid)
   610  
   611  	return blobUploadPath
   612  }
   613  
   614  // NewBlobUpload returns the unique ID for an upload in progress.
   615  func (is *ObjectStorage) NewBlobUpload(repo string) (string, error) {
   616  	if err := is.InitRepo(repo); err != nil {
   617  		is.log.Error().Err(err).Msg("error initializing repo")
   618  
   619  		return "", err
   620  	}
   621  
   622  	uuid, err := guuid.NewV4()
   623  	if err != nil {
   624  		return "", err
   625  	}
   626  
   627  	u := uuid.String()
   628  
   629  	blobUploadPath := is.BlobUploadPath(repo, u)
   630  
   631  	// here we should create an empty multi part upload, but that's not possible
   632  	// so we just create a regular empty file which will be overwritten by FinishBlobUpload
   633  	err = is.store.PutContent(context.Background(), blobUploadPath, []byte{})
   634  	if err != nil {
   635  		return "", errors.ErrRepoNotFound
   636  	}
   637  
   638  	return u, nil
   639  }
   640  
   641  // GetBlobUpload returns the current size of a blob upload.
   642  func (is *ObjectStorage) GetBlobUpload(repo string, uuid string) (int64, error) {
   643  	var fileSize int64
   644  
   645  	blobUploadPath := is.BlobUploadPath(repo, uuid)
   646  
   647  	// if it's not a multipart upload check for the regular empty file
   648  	// created by NewBlobUpload, it should have 0 size every time
   649  	isMultiPartStarted, ok := is.isMultiPartUpload[blobUploadPath]
   650  	if !isMultiPartStarted || !ok {
   651  		fi, err := is.store.Stat(context.Background(), blobUploadPath)
   652  		if err != nil {
   653  			_, ok := err.(storageDriver.PathNotFoundError)
   654  			if ok {
   655  				return -1, errors.ErrUploadNotFound
   656  			}
   657  
   658  			return -1, err
   659  		}
   660  
   661  		fileSize = fi.Size()
   662  	} else {
   663  		// otherwise get the size of multi parts upload
   664  		fi, err := getMultipartFileWriter(is, blobUploadPath)
   665  		if err != nil {
   666  			return -1, err
   667  		}
   668  
   669  		fileSize = fi.Size()
   670  	}
   671  
   672  	return fileSize, nil
   673  }
   674  
   675  // PutBlobChunkStreamed appends another chunk of data to the specified blob. It returns
   676  // the number of actual bytes to the blob.
   677  func (is *ObjectStorage) PutBlobChunkStreamed(repo string, uuid string, body io.Reader) (int64, error) {
   678  	if err := is.InitRepo(repo); err != nil {
   679  		return -1, err
   680  	}
   681  
   682  	blobUploadPath := is.BlobUploadPath(repo, uuid)
   683  
   684  	_, err := is.store.Stat(context.Background(), blobUploadPath)
   685  	if err != nil {
   686  		return -1, errors.ErrUploadNotFound
   687  	}
   688  
   689  	file, err := getMultipartFileWriter(is, blobUploadPath)
   690  	if err != nil {
   691  		is.log.Error().Err(err).Msg("failed to create multipart upload")
   692  		return -1, err
   693  	}
   694  
   695  	defer file.Close()
   696  
   697  	buf := new(bytes.Buffer)
   698  
   699  	_, err = buf.ReadFrom(body)
   700  	if err != nil {
   701  		is.log.Error().Err(err).Msg("failed to read blob")
   702  		return -1, err
   703  	}
   704  
   705  	n, err := file.Write(buf.Bytes())
   706  	if err != nil {
   707  		is.log.Error().Err(err).Msg("failed to append to file")
   708  		return -1, err
   709  	}
   710  
   711  	return int64(n), err
   712  }
   713  
   714  // PutBlobChunk writes another chunk of data to the specified blob. It returns
   715  // the number of actual bytes to the blob.
   716  func (is *ObjectStorage) PutBlobChunk(repo string, uuid string, from int64, to int64,
   717  	body io.Reader) (int64, error) {
   718  	if err := is.InitRepo(repo); err != nil {
   719  		return -1, err
   720  	}
   721  
   722  	blobUploadPath := is.BlobUploadPath(repo, uuid)
   723  
   724  	_, err := is.store.Stat(context.Background(), blobUploadPath)
   725  	if err != nil {
   726  		return -1, errors.ErrUploadNotFound
   727  	}
   728  
   729  	file, err := getMultipartFileWriter(is, blobUploadPath)
   730  	if err != nil {
   731  		is.log.Error().Err(err).Msg("failed to create multipart upload")
   732  		return -1, err
   733  	}
   734  
   735  	defer file.Close()
   736  
   737  	if from != file.Size() {
   738  		// cancel multipart upload created earlier
   739  		err := file.Cancel()
   740  		if err != nil {
   741  			is.log.Error().Err(err).Msg("failed to cancel multipart upload")
   742  			return -1, err
   743  		}
   744  
   745  		is.log.Error().Int64("expected", from).Int64("actual", file.Size()).
   746  			Msg("invalid range start for blob upload")
   747  
   748  		return -1, errors.ErrBadUploadRange
   749  	}
   750  
   751  	buf := new(bytes.Buffer)
   752  
   753  	_, err = buf.ReadFrom(body)
   754  	if err != nil {
   755  		is.log.Error().Err(err).Msg("failed to read blob")
   756  		return -1, err
   757  	}
   758  
   759  	n, err := file.Write(buf.Bytes())
   760  	if err != nil {
   761  		is.log.Error().Err(err).Msg("failed to append to file")
   762  		return -1, err
   763  	}
   764  
   765  	is.isMultiPartUpload[blobUploadPath] = true
   766  
   767  	return int64(n), err
   768  }
   769  
   770  // BlobUploadInfo returns the current blob size in bytes.
   771  func (is *ObjectStorage) BlobUploadInfo(repo string, uuid string) (int64, error) {
   772  	var fileSize int64
   773  
   774  	blobUploadPath := is.BlobUploadPath(repo, uuid)
   775  
   776  	// if it's not a multipart upload check for the regular empty file
   777  	// created by NewBlobUpload, it should have 0 size every time
   778  	isMultiPartStarted, ok := is.isMultiPartUpload[blobUploadPath]
   779  	if !isMultiPartStarted || !ok {
   780  		fi, err := is.store.Stat(context.Background(), blobUploadPath)
   781  		if err != nil {
   782  			is.log.Error().Err(err).Str("blob", blobUploadPath).Msg("failed to stat blob")
   783  			return -1, err
   784  		}
   785  
   786  		fileSize = fi.Size()
   787  	} else {
   788  		// otherwise get the size of multi parts upload
   789  		fi, err := getMultipartFileWriter(is, blobUploadPath)
   790  		if err != nil {
   791  			is.log.Error().Err(err).Str("blob", blobUploadPath).Msg("failed to stat blob")
   792  			return -1, err
   793  		}
   794  
   795  		fileSize = fi.Size()
   796  	}
   797  
   798  	return fileSize, nil
   799  }
   800  
   801  // FinishBlobUpload finalizes the blob upload and moves blob the repository.
   802  func (is *ObjectStorage) FinishBlobUpload(repo string, uuid string, body io.Reader, digest string) error {
   803  	dstDigest, err := godigest.Parse(digest)
   804  	if err != nil {
   805  		is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest")
   806  		return errors.ErrBadBlobDigest
   807  	}
   808  
   809  	src := is.BlobUploadPath(repo, uuid)
   810  
   811  	// complete multiUploadPart
   812  	fileWriter, err := is.store.Writer(context.Background(), src, true)
   813  	if err != nil {
   814  		is.log.Error().Err(err).Str("blob", src).Msg("failed to open blob")
   815  		return errors.ErrBadBlobDigest
   816  	}
   817  
   818  	if err := fileWriter.Commit(); err != nil {
   819  		is.log.Error().Err(err).Msg("failed to commit file")
   820  		return err
   821  	}
   822  
   823  	if err := fileWriter.Close(); err != nil {
   824  		is.log.Error().Err(err).Msg("failed to close file")
   825  	}
   826  
   827  	fileReader, err := is.store.Reader(context.Background(), src, 0)
   828  	if err != nil {
   829  		is.log.Error().Err(err).Str("blob", src).Msg("failed to open file")
   830  		return errors.ErrUploadNotFound
   831  	}
   832  
   833  	srcDigest, err := godigest.FromReader(fileReader)
   834  	if err != nil {
   835  		is.log.Error().Err(err).Str("blob", src).Msg("failed to open blob")
   836  		return errors.ErrBadBlobDigest
   837  	}
   838  
   839  	if srcDigest != dstDigest {
   840  		is.log.Error().Str("srcDigest", srcDigest.String()).
   841  			Str("dstDigest", dstDigest.String()).Msg("actual digest not equal to expected digest")
   842  		return errors.ErrBadBlobDigest
   843  	}
   844  
   845  	fileReader.Close()
   846  
   847  	dst := is.BlobPath(repo, dstDigest)
   848  
   849  	if err := is.store.Move(context.Background(), src, dst); err != nil {
   850  		is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
   851  			Str("dst", dst).Msg("unable to finish blob")
   852  		return err
   853  	}
   854  
   855  	// remove multipart upload, not needed anymore
   856  	delete(is.isMultiPartUpload, src)
   857  
   858  	return nil
   859  }
   860  
   861  // FullBlobUpload handles a full blob upload, and no partial session is created.
   862  func (is *ObjectStorage) FullBlobUpload(repo string, body io.Reader, digest string) (string, int64, error) {
   863  	if err := is.InitRepo(repo); err != nil {
   864  		return "", -1, err
   865  	}
   866  
   867  	dstDigest, err := godigest.Parse(digest)
   868  	if err != nil {
   869  		is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest")
   870  		return "", -1, errors.ErrBadBlobDigest
   871  	}
   872  
   873  	u, err := guuid.NewV4()
   874  	if err != nil {
   875  		return "", -1, err
   876  	}
   877  
   878  	uuid := u.String()
   879  
   880  	src := is.BlobUploadPath(repo, uuid)
   881  
   882  	digester := sha256.New()
   883  
   884  	buf := new(bytes.Buffer)
   885  
   886  	_, err = buf.ReadFrom(body)
   887  	if err != nil {
   888  		is.log.Error().Err(err).Msg("failed to read blob")
   889  		return "", -1, err
   890  	}
   891  
   892  	n, err := writeFile(is.store, src, buf.Bytes())
   893  	if err != nil {
   894  		is.log.Error().Err(err).Msg("failed to write blob")
   895  		return "", -1, err
   896  	}
   897  
   898  	_, err = digester.Write(buf.Bytes())
   899  	if err != nil {
   900  		is.log.Error().Err(err).Msg("digester failed to write")
   901  		return "", -1, err
   902  	}
   903  
   904  	srcDigest := godigest.NewDigestFromEncoded(godigest.SHA256, fmt.Sprintf("%x", digester.Sum(nil)))
   905  	if srcDigest != dstDigest {
   906  		is.log.Error().Str("srcDigest", srcDigest.String()).
   907  			Str("dstDigest", dstDigest.String()).Msg("actual digest not equal to expected digest")
   908  		return "", -1, errors.ErrBadBlobDigest
   909  	}
   910  
   911  	is.Lock()
   912  	defer is.Unlock()
   913  
   914  	dst := is.BlobPath(repo, dstDigest)
   915  
   916  	if err := is.store.Move(context.Background(), src, dst); err != nil {
   917  		is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
   918  			Str("dst", dst).Msg("unable to finish blob")
   919  		return "", -1, err
   920  	}
   921  
   922  	return uuid, int64(n), nil
   923  }
   924  
   925  func (is *ObjectStorage) DedupeBlob(src string, dstDigest godigest.Digest, dst string) error {
   926  	return nil
   927  }
   928  
   929  // DeleteBlobUpload deletes an existing blob upload that is currently in progress.
   930  func (is *ObjectStorage) DeleteBlobUpload(repo string, uuid string) error {
   931  	blobUploadPath := is.BlobUploadPath(repo, uuid)
   932  	if err := is.store.Delete(context.Background(), blobUploadPath); err != nil {
   933  		is.log.Error().Err(err).Str("blobUploadPath", blobUploadPath).Msg("error deleting blob upload")
   934  		return err
   935  	}
   936  
   937  	return nil
   938  }
   939  
   940  // BlobPath returns the repository path of a blob.
   941  func (is *ObjectStorage) BlobPath(repo string, digest godigest.Digest) string {
   942  	return path.Join(is.rootDir, repo, "blobs", digest.Algorithm().String(), digest.Encoded())
   943  }
   944  
   945  // CheckBlob verifies a blob and returns true if the blob is correct.
   946  func (is *ObjectStorage) CheckBlob(repo string, digest string) (bool, int64, error) {
   947  	d, err := godigest.Parse(digest)
   948  	if err != nil {
   949  		is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest")
   950  		return false, -1, errors.ErrBadBlobDigest
   951  	}
   952  
   953  	blobPath := is.BlobPath(repo, d)
   954  
   955  	is.RLock()
   956  	defer is.RUnlock()
   957  
   958  	blobInfo, err := is.store.Stat(context.Background(), blobPath)
   959  	if err != nil {
   960  		_, ok := err.(storageDriver.PathNotFoundError)
   961  		if ok {
   962  			return false, -1, errors.ErrBlobNotFound
   963  		}
   964  
   965  		is.log.Error().Err(err).Str("blob", blobPath).Msg("failed to stat blob")
   966  
   967  		return false, -1, err
   968  	}
   969  
   970  	is.log.Debug().Str("blob path", blobPath).Msg("blob path found")
   971  
   972  	return true, blobInfo.Size(), nil
   973  }
   974  
   975  // GetBlob returns a stream to read the blob.
   976  // FIXME: we should probably parse the manifest and use (digest, mediaType) as a
   977  // blob selector instead of directly downloading the blob.
   978  func (is *ObjectStorage) GetBlob(repo string, digest string, mediaType string) (io.Reader, int64, error) {
   979  	d, err := godigest.Parse(digest)
   980  	if err != nil {
   981  		is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest")
   982  		return nil, -1, errors.ErrBadBlobDigest
   983  	}
   984  
   985  	blobPath := is.BlobPath(repo, d)
   986  
   987  	is.RLock()
   988  	defer is.RUnlock()
   989  
   990  	blobInfo, err := is.store.Stat(context.Background(), blobPath)
   991  	if err != nil {
   992  		is.log.Error().Err(err).Str("blob", blobPath).Msg("failed to stat blob")
   993  		return nil, -1, errors.ErrBlobNotFound
   994  	}
   995  
   996  	blobReader, err := is.store.Reader(context.Background(), blobPath, 0)
   997  	if err != nil {
   998  		is.log.Error().Err(err).Str("blob", blobPath).Msg("failed to open blob")
   999  		return nil, -1, err
  1000  	}
  1001  
  1002  	return blobReader, blobInfo.Size(), nil
  1003  }
  1004  
  1005  func (is *ObjectStorage) GetBlobContent(repo string, digest string) ([]byte, error) {
  1006  	blob, _, err := is.GetBlob(repo, digest, ispec.MediaTypeImageManifest)
  1007  	if err != nil {
  1008  		return []byte{}, err
  1009  	}
  1010  
  1011  	buf := new(bytes.Buffer)
  1012  
  1013  	_, err = buf.ReadFrom(blob)
  1014  	if err != nil {
  1015  		is.log.Error().Err(err).Msg("failed to read blob")
  1016  		return []byte{}, err
  1017  	}
  1018  
  1019  	return buf.Bytes(), nil
  1020  }
  1021  
  1022  func (is *ObjectStorage) GetReferrers(repo, digest string, mediaType string) ([]notation.Descriptor, error) {
  1023  	return nil, errors.ErrMethodNotSupported
  1024  }
  1025  
  1026  func (is *ObjectStorage) GetIndexContent(repo string) ([]byte, error) {
  1027  	dir := path.Join(is.rootDir, repo)
  1028  
  1029  	buf, err := is.store.GetContent(context.Background(), path.Join(dir, "index.json"))
  1030  	if err != nil {
  1031  		is.log.Error().Err(err).Str("dir", dir).Msg("failed to read index.json")
  1032  		return []byte{}, errors.ErrRepoNotFound
  1033  	}
  1034  
  1035  	return buf, nil
  1036  }
  1037  
  1038  // DeleteBlob removes the blob from the repository.
  1039  func (is *ObjectStorage) DeleteBlob(repo string, digest string) error {
  1040  	d, err := godigest.Parse(digest)
  1041  	if err != nil {
  1042  		is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest")
  1043  		return errors.ErrBlobNotFound
  1044  	}
  1045  
  1046  	blobPath := is.BlobPath(repo, d)
  1047  
  1048  	is.Lock()
  1049  	defer is.Unlock()
  1050  
  1051  	_, err = is.store.Stat(context.Background(), blobPath)
  1052  	if err != nil {
  1053  		is.log.Error().Err(err).Str("blob", blobPath).Msg("failed to stat blob")
  1054  		return errors.ErrBlobNotFound
  1055  	}
  1056  
  1057  	if err := is.store.Delete(context.Background(), blobPath); err != nil {
  1058  		is.log.Error().Err(err).Str("blobPath", blobPath).Msg("unable to remove blob path")
  1059  		return err
  1060  	}
  1061  
  1062  	return nil
  1063  }
  1064  
  1065  // Do not use for multipart upload, buf must not be empty.
  1066  // If you want to create an empty file use is.store.PutContent().
  1067  func writeFile(store storageDriver.StorageDriver, filepath string, buf []byte) (int, error) {
  1068  	var n int
  1069  
  1070  	if fw, err := store.Writer(context.Background(), filepath, false); err == nil {
  1071  		defer fw.Close()
  1072  
  1073  		if n, err = fw.Write(buf); err != nil {
  1074  			return -1, err
  1075  		}
  1076  
  1077  		if err := fw.Commit(); err != nil {
  1078  			return -1, err
  1079  		}
  1080  	} else {
  1081  		return -1, err
  1082  	}
  1083  
  1084  	return n, nil
  1085  }
  1086  
  1087  // Because we can not create an empty multipart upload, we store multi part uploads
  1088  // so that we know when to create a fileWriter with append=true or with append=false
  1089  // Trying and handling errors results in weird s3 api errors.
  1090  func getMultipartFileWriter(is *ObjectStorage, filepath string) (storageDriver.FileWriter, error) {
  1091  	var file storageDriver.FileWriter
  1092  
  1093  	var err error
  1094  
  1095  	isMultiPartStarted, ok := is.isMultiPartUpload[filepath]
  1096  	if !isMultiPartStarted || !ok {
  1097  		file, err = is.store.Writer(context.Background(), filepath, false)
  1098  		if err != nil {
  1099  			return file, err
  1100  		}
  1101  	} else {
  1102  		file, err = is.store.Writer(context.Background(), filepath, true)
  1103  		if err != nil {
  1104  			return file, err
  1105  		}
  1106  	}
  1107  
  1108  	return file, nil
  1109  }