zotregistry.dev/zot@v1.4.4-0.20240314164342-eec277e14d20/pkg/storage/imagestore/imagestore.go (about)

     1  package imagestore
     2  
     3  import (
     4  	"context"
     5  	"crypto/sha256"
     6  	"encoding/json"
     7  	"errors"
     8  	"fmt"
     9  	"io"
    10  	"path"
    11  	"path/filepath"
    12  	"strings"
    13  	"sync"
    14  	"time"
    15  	"unicode/utf8"
    16  
    17  	"github.com/docker/distribution/registry/storage/driver"
    18  	guuid "github.com/gofrs/uuid"
    19  	godigest "github.com/opencontainers/go-digest"
    20  	ispec "github.com/opencontainers/image-spec/specs-go/v1"
    21  
    22  	zerr "zotregistry.dev/zot/errors"
    23  	zcommon "zotregistry.dev/zot/pkg/common"
    24  	"zotregistry.dev/zot/pkg/extensions/monitoring"
    25  	syncConstants "zotregistry.dev/zot/pkg/extensions/sync/constants"
    26  	zlog "zotregistry.dev/zot/pkg/log"
    27  	zreg "zotregistry.dev/zot/pkg/regexp"
    28  	"zotregistry.dev/zot/pkg/scheduler"
    29  	"zotregistry.dev/zot/pkg/storage/cache"
    30  	common "zotregistry.dev/zot/pkg/storage/common"
    31  	storageConstants "zotregistry.dev/zot/pkg/storage/constants"
    32  	storageTypes "zotregistry.dev/zot/pkg/storage/types"
    33  	"zotregistry.dev/zot/pkg/test/inject"
    34  )
    35  
    36  const (
    37  	cosignSignatureTagSuffix = "sig"
    38  	SBOMTagSuffix            = "sbom"
    39  )
    40  
    41  // ImageStore provides the image storage operations.
    42  type ImageStore struct {
    43  	rootDir     string
    44  	storeDriver storageTypes.Driver
    45  	lock        *sync.RWMutex
    46  	log         zlog.Logger
    47  	metrics     monitoring.MetricServer
    48  	cache       cache.Cache
    49  	dedupe      bool
    50  	linter      common.Lint
    51  	commit      bool
    52  }
    53  
    54  func (is *ImageStore) Name() string {
    55  	return is.storeDriver.Name()
    56  }
    57  
    58  func (is *ImageStore) RootDir() string {
    59  	return is.rootDir
    60  }
    61  
    62  func (is *ImageStore) DirExists(d string) bool {
    63  	return is.storeDriver.DirExists(d)
    64  }
    65  
    66  // NewImageStore returns a new image store backed by cloud storages.
    67  // see https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers
    68  // Use the last argument to properly set a cache database, or it will default to boltDB local storage.
    69  func NewImageStore(rootDir string, cacheDir string, dedupe, commit bool, log zlog.Logger,
    70  	metrics monitoring.MetricServer, linter common.Lint, storeDriver storageTypes.Driver, cacheDriver cache.Cache,
    71  ) storageTypes.ImageStore {
    72  	if err := storeDriver.EnsureDir(rootDir); err != nil {
    73  		log.Error().Err(err).Str("rootDir", rootDir).Msg("failed to create root dir")
    74  
    75  		return nil
    76  	}
    77  
    78  	imgStore := &ImageStore{
    79  		rootDir:     rootDir,
    80  		storeDriver: storeDriver,
    81  		lock:        &sync.RWMutex{},
    82  		log:         log,
    83  		metrics:     metrics,
    84  		dedupe:      dedupe,
    85  		linter:      linter,
    86  		commit:      commit,
    87  		cache:       cacheDriver,
    88  	}
    89  
    90  	return imgStore
    91  }
    92  
    93  // RLock read-lock.
    94  func (is *ImageStore) RLock(lockStart *time.Time) {
    95  	*lockStart = time.Now()
    96  
    97  	is.lock.RLock()
    98  }
    99  
   100  // RUnlock read-unlock.
   101  func (is *ImageStore) RUnlock(lockStart *time.Time) {
   102  	is.lock.RUnlock()
   103  
   104  	lockEnd := time.Now()
   105  	// includes time spent in acquiring and holding a lock
   106  	latency := lockEnd.Sub(*lockStart)
   107  	monitoring.ObserveStorageLockLatency(is.metrics, latency, is.RootDir(), storageConstants.RLOCK) // histogram
   108  }
   109  
   110  // Lock write-lock.
   111  func (is *ImageStore) Lock(lockStart *time.Time) {
   112  	*lockStart = time.Now()
   113  
   114  	is.lock.Lock()
   115  }
   116  
   117  // Unlock write-unlock.
   118  func (is *ImageStore) Unlock(lockStart *time.Time) {
   119  	is.lock.Unlock()
   120  
   121  	lockEnd := time.Now()
   122  	// includes time spent in acquiring and holding a lock
   123  	latency := lockEnd.Sub(*lockStart)
   124  	monitoring.ObserveStorageLockLatency(is.metrics, latency, is.RootDir(), storageConstants.RWLOCK) // histogram
   125  }
   126  
   127  func (is *ImageStore) initRepo(name string) error {
   128  	repoDir := path.Join(is.rootDir, name)
   129  
   130  	if !utf8.ValidString(name) {
   131  		is.log.Error().Msg("invalid UTF-8 input")
   132  
   133  		return zerr.ErrInvalidRepositoryName
   134  	}
   135  
   136  	if !zreg.FullNameRegexp.MatchString(name) {
   137  		is.log.Error().Str("repository", name).Msg("invalid repository name")
   138  
   139  		return zerr.ErrInvalidRepositoryName
   140  	}
   141  
   142  	// create "blobs" subdir
   143  	err := is.storeDriver.EnsureDir(path.Join(repoDir, "blobs"))
   144  	if err != nil {
   145  		is.log.Error().Err(err).Str("repository", name).Str("dir", repoDir).Msg("failed to create blobs subdir")
   146  
   147  		return err
   148  	}
   149  	// create BlobUploadDir subdir
   150  	err = is.storeDriver.EnsureDir(path.Join(repoDir, storageConstants.BlobUploadDir))
   151  	if err != nil {
   152  		is.log.Error().Err(err).Msg("failed to create blob upload subdir")
   153  
   154  		return err
   155  	}
   156  
   157  	// "oci-layout" file - create if it doesn't exist
   158  	ilPath := path.Join(repoDir, ispec.ImageLayoutFile)
   159  	if _, err := is.storeDriver.Stat(ilPath); err != nil {
   160  		il := ispec.ImageLayout{Version: ispec.ImageLayoutVersion}
   161  
   162  		buf, err := json.Marshal(il)
   163  		if err != nil {
   164  			is.log.Error().Err(err).Msg("failed to marshal JSON")
   165  
   166  			return err
   167  		}
   168  
   169  		if _, err := is.storeDriver.WriteFile(ilPath, buf); err != nil {
   170  			is.log.Error().Err(err).Str("file", ilPath).Msg("failed to write file")
   171  
   172  			return err
   173  		}
   174  	}
   175  
   176  	// "index.json" file - create if it doesn't exist
   177  	indexPath := path.Join(repoDir, "index.json")
   178  	if _, err := is.storeDriver.Stat(indexPath); err != nil {
   179  		index := ispec.Index{}
   180  		index.SchemaVersion = 2
   181  
   182  		buf, err := json.Marshal(index)
   183  		if err != nil {
   184  			is.log.Error().Err(err).Msg("failed to marshal JSON")
   185  
   186  			return err
   187  		}
   188  
   189  		if _, err := is.storeDriver.WriteFile(indexPath, buf); err != nil {
   190  			is.log.Error().Err(err).Str("file", ilPath).Msg("failed to write file")
   191  
   192  			return err
   193  		}
   194  	}
   195  
   196  	return nil
   197  }
   198  
   199  // InitRepo creates an image repository under this store.
   200  func (is *ImageStore) InitRepo(name string) error {
   201  	var lockLatency time.Time
   202  
   203  	is.Lock(&lockLatency)
   204  	defer is.Unlock(&lockLatency)
   205  
   206  	return is.initRepo(name)
   207  }
   208  
   209  // ValidateRepo validates that the repository layout is complaint with the OCI repo layout.
   210  func (is *ImageStore) ValidateRepo(name string) (bool, error) {
   211  	if !zreg.FullNameRegexp.MatchString(name) {
   212  		return false, zerr.ErrInvalidRepositoryName
   213  	}
   214  
   215  	// https://github.com/opencontainers/image-spec/blob/master/image-layout.md#content
   216  	// at least, expect at least 3 entries - ["blobs", "oci-layout", "index.json"]
   217  	// and an additional/optional BlobUploadDir in each image store
   218  	// for s3 we can not create empty dirs, so we check only against index.json and oci-layout
   219  	dir := path.Join(is.rootDir, name)
   220  	if fi, err := is.storeDriver.Stat(dir); err != nil || !fi.IsDir() {
   221  		return false, zerr.ErrRepoNotFound
   222  	}
   223  
   224  	files, err := is.storeDriver.List(dir)
   225  	if err != nil {
   226  		is.log.Error().Err(err).Str("dir", dir).Msg("failed to read directory")
   227  
   228  		return false, zerr.ErrRepoNotFound
   229  	}
   230  
   231  	//nolint:gomnd
   232  	if len(files) < 2 {
   233  		return false, zerr.ErrRepoBadVersion
   234  	}
   235  
   236  	found := map[string]bool{
   237  		ispec.ImageLayoutFile: false,
   238  		"index.json":          false,
   239  	}
   240  
   241  	for _, file := range files {
   242  		fileInfo, err := is.storeDriver.Stat(file)
   243  		if err != nil {
   244  			return false, err
   245  		}
   246  
   247  		filename, err := filepath.Rel(dir, file)
   248  		if err != nil {
   249  			return false, err
   250  		}
   251  
   252  		if filename == "blobs" && !fileInfo.IsDir() {
   253  			return false, nil
   254  		}
   255  
   256  		found[filename] = true
   257  	}
   258  
   259  	// check blobs dir exists only for filesystem, in s3 we can't have empty dirs
   260  	if is.storeDriver.Name() == storageConstants.LocalStorageDriverName {
   261  		if !is.storeDriver.DirExists(path.Join(dir, "blobs")) {
   262  			return false, nil
   263  		}
   264  	}
   265  
   266  	for k, v := range found {
   267  		if !v && k != storageConstants.BlobUploadDir {
   268  			return false, nil
   269  		}
   270  	}
   271  
   272  	buf, err := is.storeDriver.ReadFile(path.Join(dir, ispec.ImageLayoutFile))
   273  	if err != nil {
   274  		return false, err
   275  	}
   276  
   277  	var il ispec.ImageLayout
   278  	if err := json.Unmarshal(buf, &il); err != nil {
   279  		return false, err
   280  	}
   281  
   282  	if il.Version != ispec.ImageLayoutVersion {
   283  		return false, zerr.ErrRepoBadVersion
   284  	}
   285  
   286  	return true, nil
   287  }
   288  
   289  // GetRepositories returns a list of all the repositories under this store.
   290  func (is *ImageStore) GetRepositories() ([]string, error) {
   291  	var lockLatency time.Time
   292  
   293  	dir := is.rootDir
   294  
   295  	is.RLock(&lockLatency)
   296  	defer is.RUnlock(&lockLatency)
   297  
   298  	stores := make([]string, 0)
   299  
   300  	err := is.storeDriver.Walk(dir, func(fileInfo driver.FileInfo) error {
   301  		if !fileInfo.IsDir() {
   302  			return nil
   303  		}
   304  
   305  		// skip .sync and .uploads dirs no need to try to validate them
   306  		if strings.HasSuffix(fileInfo.Path(), syncConstants.SyncBlobUploadDir) ||
   307  			strings.HasSuffix(fileInfo.Path(), storageConstants.BlobUploadDir) {
   308  			return driver.ErrSkipDir
   309  		}
   310  
   311  		rel, err := filepath.Rel(is.rootDir, fileInfo.Path())
   312  		if err != nil {
   313  			return nil //nolint:nilerr // ignore paths that are not under root dir
   314  		}
   315  
   316  		if ok, err := is.ValidateRepo(rel); !ok || err != nil {
   317  			return nil //nolint:nilerr // ignore invalid repos
   318  		}
   319  
   320  		stores = append(stores, rel)
   321  
   322  		return nil
   323  	})
   324  
   325  	// if the root directory is not yet created then return an empty slice of repositories
   326  	var perr driver.PathNotFoundError
   327  	if errors.As(err, &perr) {
   328  		return stores, nil
   329  	}
   330  
   331  	return stores, err
   332  }
   333  
   334  // GetNextRepository returns next repository under this store.
   335  func (is *ImageStore) GetNextRepository(repo string) (string, error) {
   336  	var lockLatency time.Time
   337  
   338  	dir := is.rootDir
   339  
   340  	is.RLock(&lockLatency)
   341  	defer is.RUnlock(&lockLatency)
   342  
   343  	_, err := is.storeDriver.List(dir)
   344  	if err != nil {
   345  		if errors.As(err, &driver.PathNotFoundError{}) {
   346  			is.log.Debug().Msg("empty rootDir")
   347  
   348  			return "", nil
   349  		}
   350  
   351  		is.log.Error().Err(err).Str("root-dir", dir).Msg("failed to walk storage root-dir")
   352  
   353  		return "", err
   354  	}
   355  
   356  	found := false
   357  	store := ""
   358  	err = is.storeDriver.Walk(dir, func(fileInfo driver.FileInfo) error {
   359  		if !fileInfo.IsDir() {
   360  			return nil
   361  		}
   362  
   363  		rel, err := filepath.Rel(is.rootDir, fileInfo.Path())
   364  		if err != nil {
   365  			return nil //nolint:nilerr // ignore paths not relative to root dir
   366  		}
   367  
   368  		ok, err := is.ValidateRepo(rel)
   369  		if !ok || err != nil {
   370  			return nil //nolint:nilerr // ignore invalid repos
   371  		}
   372  
   373  		if repo == "" && ok && err == nil {
   374  			store = rel
   375  
   376  			return io.EOF
   377  		}
   378  
   379  		if found {
   380  			store = rel
   381  
   382  			return io.EOF
   383  		}
   384  
   385  		if rel == repo {
   386  			found = true
   387  		}
   388  
   389  		return nil
   390  	})
   391  
   392  	driverErr := &driver.Error{}
   393  
   394  	// some s3 implementations (eg, digitalocean spaces) will return pathnotfounderror for walk but not list
   395  	// therefore, we must also catch that error here.
   396  	if errors.As(err, &driver.PathNotFoundError{}) {
   397  		is.log.Debug().Msg("empty rootDir")
   398  
   399  		return "", nil
   400  	}
   401  
   402  	if errors.Is(err, io.EOF) ||
   403  		(errors.As(err, driverErr) && errors.Is(driverErr.Enclosed, io.EOF)) {
   404  		return store, nil
   405  	}
   406  
   407  	return store, err
   408  }
   409  
   410  // GetImageTags returns a list of image tags available in the specified repository.
   411  func (is *ImageStore) GetImageTags(repo string) ([]string, error) {
   412  	var lockLatency time.Time
   413  
   414  	dir := path.Join(is.rootDir, repo)
   415  	if fi, err := is.storeDriver.Stat(dir); err != nil || !fi.IsDir() {
   416  		return nil, zerr.ErrRepoNotFound
   417  	}
   418  
   419  	is.RLock(&lockLatency)
   420  	defer is.RUnlock(&lockLatency)
   421  
   422  	index, err := common.GetIndex(is, repo, is.log)
   423  	if err != nil {
   424  		return nil, err
   425  	}
   426  
   427  	return common.GetTagsByIndex(index), nil
   428  }
   429  
   430  // GetImageManifest returns the image manifest of an image in the specific repository.
   431  func (is *ImageStore) GetImageManifest(repo, reference string) ([]byte, godigest.Digest, string, error) {
   432  	dir := path.Join(is.rootDir, repo)
   433  	if fi, err := is.storeDriver.Stat(dir); err != nil || !fi.IsDir() {
   434  		return nil, "", "", zerr.ErrRepoNotFound
   435  	}
   436  
   437  	var lockLatency time.Time
   438  
   439  	var err error
   440  
   441  	is.RLock(&lockLatency)
   442  	defer func() {
   443  		is.RUnlock(&lockLatency)
   444  
   445  		if err == nil {
   446  			monitoring.IncDownloadCounter(is.metrics, repo)
   447  		}
   448  	}()
   449  
   450  	index, err := common.GetIndex(is, repo, is.log)
   451  	if err != nil {
   452  		return nil, "", "", err
   453  	}
   454  
   455  	manifestDesc, found := common.GetManifestDescByReference(index, reference)
   456  	if !found {
   457  		return nil, "", "", zerr.ErrManifestNotFound
   458  	}
   459  
   460  	buf, err := is.GetBlobContent(repo, manifestDesc.Digest)
   461  	if err != nil {
   462  		if errors.Is(err, zerr.ErrBlobNotFound) {
   463  			return nil, "", "", zerr.ErrManifestNotFound
   464  		}
   465  
   466  		return nil, "", "", err
   467  	}
   468  
   469  	var manifest ispec.Manifest
   470  	if err := json.Unmarshal(buf, &manifest); err != nil {
   471  		is.log.Error().Err(err).Str("dir", dir).Msg("invalid JSON")
   472  
   473  		return nil, "", "", err
   474  	}
   475  
   476  	return buf, manifestDesc.Digest, manifestDesc.MediaType, nil
   477  }
   478  
   479  // PutImageManifest adds an image manifest to the repository.
   480  func (is *ImageStore) PutImageManifest(repo, reference, mediaType string, //nolint: gocyclo
   481  	body []byte,
   482  ) (godigest.Digest, godigest.Digest, error) {
   483  	if err := is.InitRepo(repo); err != nil {
   484  		is.log.Debug().Err(err).Msg("init repo")
   485  
   486  		return "", "", err
   487  	}
   488  
   489  	var lockLatency time.Time
   490  
   491  	var err error
   492  
   493  	is.Lock(&lockLatency)
   494  	defer func() {
   495  		is.Unlock(&lockLatency)
   496  
   497  		if err == nil {
   498  			if is.storeDriver.Name() == storageConstants.LocalStorageDriverName {
   499  				monitoring.SetStorageUsage(is.metrics, is.rootDir, repo)
   500  			}
   501  
   502  			monitoring.IncUploadCounter(is.metrics, repo)
   503  		}
   504  	}()
   505  
   506  	refIsDigest := true
   507  
   508  	mDigest, err := common.GetAndValidateRequestDigest(body, reference, is.log)
   509  	if err != nil {
   510  		if errors.Is(err, zerr.ErrBadManifest) {
   511  			return mDigest, "", err
   512  		}
   513  
   514  		refIsDigest = false
   515  	}
   516  
   517  	dig, err := common.ValidateManifest(is, repo, reference, mediaType, body, is.log)
   518  	if err != nil {
   519  		return dig, "", err
   520  	}
   521  
   522  	index, err := common.GetIndex(is, repo, is.log)
   523  	if err != nil {
   524  		return "", "", err
   525  	}
   526  
   527  	// create a new descriptor
   528  	desc := ispec.Descriptor{
   529  		MediaType: mediaType, Size: int64(len(body)), Digest: mDigest,
   530  	}
   531  
   532  	if !refIsDigest {
   533  		desc.Annotations = map[string]string{ispec.AnnotationRefName: reference}
   534  	}
   535  
   536  	var subjectDigest godigest.Digest
   537  
   538  	artifactType := ""
   539  
   540  	if mediaType == ispec.MediaTypeImageManifest {
   541  		var manifest ispec.Manifest
   542  
   543  		err := json.Unmarshal(body, &manifest)
   544  		if err != nil {
   545  			return "", "", err
   546  		}
   547  
   548  		if manifest.Subject != nil {
   549  			subjectDigest = manifest.Subject.Digest
   550  		}
   551  
   552  		artifactType = zcommon.GetManifestArtifactType(manifest)
   553  	} else if mediaType == ispec.MediaTypeImageIndex {
   554  		var index ispec.Index
   555  
   556  		err := json.Unmarshal(body, &index)
   557  		if err != nil {
   558  			return "", "", err
   559  		}
   560  
   561  		if index.Subject != nil {
   562  			subjectDigest = index.Subject.Digest
   563  		}
   564  
   565  		artifactType = zcommon.GetIndexArtifactType(index)
   566  	}
   567  
   568  	updateIndex, oldDgst, err := common.CheckIfIndexNeedsUpdate(&index, &desc, is.log)
   569  	if err != nil {
   570  		return "", "", err
   571  	}
   572  
   573  	if !updateIndex {
   574  		return desc.Digest, subjectDigest, nil
   575  	}
   576  
   577  	// write manifest to "blobs"
   578  	dir := path.Join(is.rootDir, repo, "blobs", mDigest.Algorithm().String())
   579  	manifestPath := path.Join(dir, mDigest.Encoded())
   580  
   581  	if _, err = is.storeDriver.WriteFile(manifestPath, body); err != nil {
   582  		is.log.Error().Err(err).Str("file", manifestPath).Msg("failed to write")
   583  
   584  		return "", "", err
   585  	}
   586  
   587  	err = common.UpdateIndexWithPrunedImageManifests(is, &index, repo, desc, oldDgst, is.log)
   588  	if err != nil {
   589  		return "", "", err
   590  	}
   591  
   592  	// now update "index.json"
   593  	index.Manifests = append(index.Manifests, desc)
   594  
   595  	// update the descriptors artifact type in order to check for signatures when applying the linter
   596  	desc.ArtifactType = artifactType
   597  
   598  	// apply linter only on images, not signatures
   599  	pass, err := common.ApplyLinter(is, is.linter, repo, desc)
   600  	if !pass {
   601  		is.log.Error().Err(err).Str("repository", repo).Str("reference", reference).
   602  			Msg("linter didn't pass")
   603  
   604  		return "", "", err
   605  	}
   606  
   607  	if err := is.PutIndexContent(repo, index); err != nil {
   608  		return "", "", err
   609  	}
   610  
   611  	return desc.Digest, subjectDigest, nil
   612  }
   613  
   614  // DeleteImageManifest deletes the image manifest from the repository.
   615  func (is *ImageStore) DeleteImageManifest(repo, reference string, detectCollisions bool) error {
   616  	dir := path.Join(is.rootDir, repo)
   617  	if fi, err := is.storeDriver.Stat(dir); err != nil || !fi.IsDir() {
   618  		return zerr.ErrRepoNotFound
   619  	}
   620  
   621  	var lockLatency time.Time
   622  
   623  	is.Lock(&lockLatency)
   624  	defer is.Unlock(&lockLatency)
   625  
   626  	err := is.deleteImageManifest(repo, reference, detectCollisions)
   627  	if err != nil {
   628  		return err
   629  	}
   630  
   631  	return nil
   632  }
   633  
   634  func (is *ImageStore) deleteImageManifest(repo, reference string, detectCollisions bool) error {
   635  	defer func() {
   636  		if is.storeDriver.Name() == storageConstants.LocalStorageDriverName {
   637  			monitoring.SetStorageUsage(is.metrics, is.rootDir, repo)
   638  		}
   639  	}()
   640  
   641  	index, err := common.GetIndex(is, repo, is.log)
   642  	if err != nil {
   643  		return err
   644  	}
   645  
   646  	manifestDesc, err := common.RemoveManifestDescByReference(&index, reference, detectCollisions)
   647  	if err != nil {
   648  		return err
   649  	}
   650  
   651  	/* check if manifest is referenced in image indexes, do not allow index images manipulations
   652  	(ie. remove manifest being part of an image index)	*/
   653  	if manifestDesc.MediaType == ispec.MediaTypeImageManifest {
   654  		for _, mDesc := range index.Manifests {
   655  			if mDesc.MediaType == ispec.MediaTypeImageIndex {
   656  				if ok, _ := common.IsBlobReferencedInImageIndex(is, repo, manifestDesc.Digest, ispec.Index{
   657  					Manifests: []ispec.Descriptor{mDesc},
   658  				}, is.log); ok {
   659  					return zerr.ErrManifestReferenced
   660  				}
   661  			}
   662  		}
   663  	}
   664  
   665  	err = common.UpdateIndexWithPrunedImageManifests(is, &index, repo, manifestDesc, manifestDesc.Digest, is.log)
   666  	if err != nil {
   667  		return err
   668  	}
   669  
   670  	// now update "index.json"
   671  	dir := path.Join(is.rootDir, repo)
   672  	file := path.Join(dir, "index.json")
   673  
   674  	buf, err := json.Marshal(index)
   675  	if err != nil {
   676  		return err
   677  	}
   678  
   679  	if _, err := is.storeDriver.WriteFile(file, buf); err != nil {
   680  		is.log.Debug().Str("reference", reference).Str("repository", repo).Msg("failed to update index.json")
   681  
   682  		return err
   683  	}
   684  
   685  	// Delete blob only when blob digest not present in manifest entry.
   686  	// e.g. 1.0.1 & 1.0.2 have same blob digest so if we delete 1.0.1, blob should not be removed.
   687  	toDelete := true
   688  
   689  	for _, manifest := range index.Manifests {
   690  		if manifestDesc.Digest.String() == manifest.Digest.String() {
   691  			toDelete = false
   692  
   693  			break
   694  		}
   695  	}
   696  
   697  	if toDelete {
   698  		p := path.Join(dir, "blobs", manifestDesc.Digest.Algorithm().String(), manifestDesc.Digest.Encoded())
   699  
   700  		err = is.storeDriver.Delete(p)
   701  		if err != nil {
   702  			return err
   703  		}
   704  	}
   705  
   706  	return nil
   707  }
   708  
   709  // BlobUploadPath returns the upload path for a blob in this store.
   710  func (is *ImageStore) BlobUploadPath(repo, uuid string) string {
   711  	dir := path.Join(is.rootDir, repo)
   712  	blobUploadPath := path.Join(dir, storageConstants.BlobUploadDir, uuid)
   713  
   714  	return blobUploadPath
   715  }
   716  
   717  // NewBlobUpload returns the unique ID for an upload in progress.
   718  func (is *ImageStore) NewBlobUpload(repo string) (string, error) {
   719  	if err := is.InitRepo(repo); err != nil {
   720  		is.log.Error().Err(err).Msg("failed to initialize repo")
   721  
   722  		return "", err
   723  	}
   724  
   725  	uuid, err := guuid.NewV4()
   726  	if err != nil {
   727  		return "", err
   728  	}
   729  
   730  	uid := uuid.String()
   731  
   732  	blobUploadPath := is.BlobUploadPath(repo, uid)
   733  
   734  	// create multipart upload (append false)
   735  	writer, err := is.storeDriver.Writer(blobUploadPath, false)
   736  	if err != nil {
   737  		is.log.Debug().Err(err).Str("blob", blobUploadPath).Msg("failed to start multipart writer")
   738  
   739  		return "", zerr.ErrRepoNotFound
   740  	}
   741  
   742  	defer writer.Close()
   743  
   744  	return uid, nil
   745  }
   746  
   747  // GetBlobUpload returns the current size of a blob upload.
   748  func (is *ImageStore) GetBlobUpload(repo, uuid string) (int64, error) {
   749  	blobUploadPath := is.BlobUploadPath(repo, uuid)
   750  
   751  	if !utf8.ValidString(blobUploadPath) {
   752  		is.log.Error().Msg("invalid UTF-8 input")
   753  
   754  		return -1, zerr.ErrInvalidRepositoryName
   755  	}
   756  
   757  	writer, err := is.storeDriver.Writer(blobUploadPath, true)
   758  	if err != nil {
   759  		if errors.As(err, &driver.PathNotFoundError{}) {
   760  			return -1, zerr.ErrUploadNotFound
   761  		}
   762  
   763  		return -1, err
   764  	}
   765  
   766  	defer writer.Close()
   767  
   768  	return writer.Size(), nil
   769  }
   770  
   771  // PutBlobChunkStreamed appends another chunk of data to the specified blob. It returns
   772  // the number of actual bytes to the blob.
   773  func (is *ImageStore) PutBlobChunkStreamed(repo, uuid string, body io.Reader) (int64, error) {
   774  	if err := is.InitRepo(repo); err != nil {
   775  		return -1, err
   776  	}
   777  
   778  	blobUploadPath := is.BlobUploadPath(repo, uuid)
   779  
   780  	file, err := is.storeDriver.Writer(blobUploadPath, true)
   781  	if err != nil {
   782  		if errors.As(err, &driver.PathNotFoundError{}) {
   783  			return -1, zerr.ErrUploadNotFound
   784  		}
   785  
   786  		is.log.Error().Err(err).Msg("failed to continue multipart upload")
   787  
   788  		return -1, err
   789  	}
   790  
   791  	var n int64 //nolint: varnamelen
   792  
   793  	defer func() {
   794  		err = file.Close()
   795  	}()
   796  
   797  	n, err = io.Copy(file, body)
   798  
   799  	return n, err
   800  }
   801  
   802  // PutBlobChunk writes another chunk of data to the specified blob. It returns
   803  // the number of actual bytes to the blob.
   804  func (is *ImageStore) PutBlobChunk(repo, uuid string, from, to int64,
   805  	body io.Reader,
   806  ) (int64, error) {
   807  	if err := is.InitRepo(repo); err != nil {
   808  		return -1, err
   809  	}
   810  
   811  	blobUploadPath := is.BlobUploadPath(repo, uuid)
   812  
   813  	file, err := is.storeDriver.Writer(blobUploadPath, true)
   814  	if err != nil {
   815  		if errors.As(err, &driver.PathNotFoundError{}) {
   816  			return -1, zerr.ErrUploadNotFound
   817  		}
   818  
   819  		is.log.Error().Err(err).Msg("failed to continue multipart upload")
   820  
   821  		return -1, err
   822  	}
   823  
   824  	defer file.Close()
   825  
   826  	if from != file.Size() {
   827  		is.log.Error().Int64("expected", from).Int64("actual", file.Size()).
   828  			Msg("invalid range start for blob upload")
   829  
   830  		return -1, zerr.ErrBadUploadRange
   831  	}
   832  
   833  	n, err := io.Copy(file, body)
   834  
   835  	return n, err
   836  }
   837  
   838  // BlobUploadInfo returns the current blob size in bytes.
   839  func (is *ImageStore) BlobUploadInfo(repo, uuid string) (int64, error) {
   840  	blobUploadPath := is.BlobUploadPath(repo, uuid)
   841  
   842  	writer, err := is.storeDriver.Writer(blobUploadPath, true)
   843  	if err != nil {
   844  		if errors.As(err, &driver.PathNotFoundError{}) {
   845  			return -1, zerr.ErrUploadNotFound
   846  		}
   847  
   848  		return -1, err
   849  	}
   850  
   851  	defer writer.Close()
   852  
   853  	return writer.Size(), nil
   854  }
   855  
   856  // FinishBlobUpload finalizes the blob upload and moves blob the repository.
   857  func (is *ImageStore) FinishBlobUpload(repo, uuid string, body io.Reader, dstDigest godigest.Digest) error {
   858  	if err := dstDigest.Validate(); err != nil {
   859  		return err
   860  	}
   861  
   862  	src := is.BlobUploadPath(repo, uuid)
   863  
   864  	// complete multiUploadPart
   865  	fileWriter, err := is.storeDriver.Writer(src, true)
   866  	if err != nil {
   867  		is.log.Error().Err(err).Str("blob", src).Msg("failed to open blob")
   868  
   869  		return zerr.ErrUploadNotFound
   870  	}
   871  
   872  	if err := fileWriter.Commit(); err != nil {
   873  		is.log.Error().Err(err).Msg("failed to commit file")
   874  
   875  		return err
   876  	}
   877  
   878  	if err := fileWriter.Close(); err != nil {
   879  		is.log.Error().Err(err).Msg("failed to close file")
   880  
   881  		return err
   882  	}
   883  
   884  	srcDigest, err := getBlobDigest(is, src)
   885  	if err != nil {
   886  		is.log.Error().Err(err).Str("blob", src).Msg("failed to open blob")
   887  
   888  		return err
   889  	}
   890  
   891  	if srcDigest != dstDigest {
   892  		is.log.Error().Str("srcDigest", srcDigest.String()).
   893  			Str("dstDigest", dstDigest.String()).Msg("actual digest not equal to expected digest")
   894  
   895  		return zerr.ErrBadBlobDigest
   896  	}
   897  
   898  	dir := path.Join(is.rootDir, repo, "blobs", dstDigest.Algorithm().String())
   899  
   900  	err = is.storeDriver.EnsureDir(dir)
   901  	if err != nil {
   902  		is.log.Error().Err(err).Str("dir", dir).Msg("failed to create dir")
   903  
   904  		return err
   905  	}
   906  
   907  	dst := is.BlobPath(repo, dstDigest)
   908  
   909  	var lockLatency time.Time
   910  
   911  	is.Lock(&lockLatency)
   912  	defer is.Unlock(&lockLatency)
   913  
   914  	if is.dedupe && fmt.Sprintf("%v", is.cache) != fmt.Sprintf("%v", nil) {
   915  		err = is.DedupeBlob(src, dstDigest, repo, dst)
   916  		if err := inject.Error(err); err != nil {
   917  			is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
   918  				Str("dst", dst).Msg("failed to dedupe blob")
   919  
   920  			return err
   921  		}
   922  	} else {
   923  		if err := is.storeDriver.Move(src, dst); err != nil {
   924  			is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
   925  				Str("dst", dst).Msg("failed to finish blob")
   926  
   927  			return err
   928  		}
   929  	}
   930  
   931  	return nil
   932  }
   933  
   934  // FullBlobUpload handles a full blob upload, and no partial session is created.
   935  func (is *ImageStore) FullBlobUpload(repo string, body io.Reader, dstDigest godigest.Digest) (string, int64, error) {
   936  	if err := dstDigest.Validate(); err != nil {
   937  		return "", -1, err
   938  	}
   939  
   940  	if err := is.InitRepo(repo); err != nil {
   941  		return "", -1, err
   942  	}
   943  
   944  	u, err := guuid.NewV4()
   945  	if err != nil {
   946  		return "", -1, err
   947  	}
   948  
   949  	uuid := u.String()
   950  	src := is.BlobUploadPath(repo, uuid)
   951  	digester := sha256.New()
   952  
   953  	blobFile, err := is.storeDriver.Writer(src, false)
   954  	if err != nil {
   955  		is.log.Error().Err(err).Str("blob", src).Msg("failed to open blob")
   956  
   957  		return "", -1, zerr.ErrUploadNotFound
   958  	}
   959  
   960  	defer blobFile.Close()
   961  
   962  	mw := io.MultiWriter(blobFile, digester)
   963  
   964  	nbytes, err := io.Copy(mw, body)
   965  	if err != nil {
   966  		return "", -1, err
   967  	}
   968  
   969  	if err := blobFile.Commit(); err != nil {
   970  		is.log.Error().Err(err).Str("blob", src).Msg("failed to commit blob")
   971  
   972  		return "", -1, err
   973  	}
   974  
   975  	srcDigest := godigest.NewDigestFromEncoded(godigest.SHA256, fmt.Sprintf("%x", digester.Sum(nil)))
   976  	if srcDigest != dstDigest {
   977  		is.log.Error().Str("srcDigest", srcDigest.String()).
   978  			Str("dstDigest", dstDigest.String()).Msg("actual digest not equal to expected digest")
   979  
   980  		return "", -1, zerr.ErrBadBlobDigest
   981  	}
   982  
   983  	dir := path.Join(is.rootDir, repo, "blobs", dstDigest.Algorithm().String())
   984  	_ = is.storeDriver.EnsureDir(dir)
   985  
   986  	var lockLatency time.Time
   987  
   988  	is.Lock(&lockLatency)
   989  	defer is.Unlock(&lockLatency)
   990  
   991  	dst := is.BlobPath(repo, dstDigest)
   992  
   993  	if is.dedupe && fmt.Sprintf("%v", is.cache) != fmt.Sprintf("%v", nil) {
   994  		if err := is.DedupeBlob(src, dstDigest, repo, dst); err != nil {
   995  			is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
   996  				Str("dst", dst).Msg("failed to dedupe blob")
   997  
   998  			return "", -1, err
   999  		}
  1000  	} else {
  1001  		if err := is.storeDriver.Move(src, dst); err != nil {
  1002  			is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
  1003  				Str("dst", dst).Msg("failed to finish blob")
  1004  
  1005  			return "", -1, err
  1006  		}
  1007  	}
  1008  
  1009  	return uuid, nbytes, nil
  1010  }
  1011  
  1012  func (is *ImageStore) DedupeBlob(src string, dstDigest godigest.Digest, dstRepo string, dst string) error {
  1013  retry:
  1014  	is.log.Debug().Str("src", src).Str("dstDigest", dstDigest.String()).Str("dst", dst).Msg("dedupe begin")
  1015  
  1016  	dstRecord, err := is.cache.GetBlob(dstDigest)
  1017  	if err := inject.Error(err); err != nil && !errors.Is(err, zerr.ErrCacheMiss) {
  1018  		is.log.Error().Err(err).Str("blobPath", dst).Str("component", "dedupe").Msg("failed to lookup blob record")
  1019  
  1020  		return err
  1021  	}
  1022  
  1023  	if dstRecord == "" {
  1024  		// cache record doesn't exist, so first disk and cache entry for this digest
  1025  		if err := is.cache.PutBlob(dstDigest, dst); err != nil {
  1026  			is.log.Error().Err(err).Str("blobPath", dst).Str("component", "dedupe").
  1027  				Msg("failed to insert blob record")
  1028  
  1029  			return err
  1030  		}
  1031  
  1032  		// move the blob from uploads to final dest
  1033  		if err := is.storeDriver.Move(src, dst); err != nil {
  1034  			is.log.Error().Err(err).Str("src", src).Str("dst", dst).Str("component", "dedupe").
  1035  				Msg("failed to rename blob")
  1036  
  1037  			return err
  1038  		}
  1039  
  1040  		is.log.Debug().Str("src", src).Str("dst", dst).Str("component", "dedupe").Msg("rename")
  1041  	} else {
  1042  		// cache record exists, but due to GC and upgrades from older versions,
  1043  		// disk content and cache records may go out of sync
  1044  		if is.cache.UsesRelativePaths() {
  1045  			dstRecord = path.Join(is.rootDir, dstRecord)
  1046  		}
  1047  
  1048  		blobInfo, err := is.storeDriver.Stat(dstRecord)
  1049  		if err != nil {
  1050  			is.log.Error().Err(err).Str("blobPath", dstRecord).Str("component", "dedupe").Msg("failed to stat")
  1051  			// the actual blob on disk may have been removed by GC, so sync the cache
  1052  			err := is.cache.DeleteBlob(dstDigest, dstRecord)
  1053  			if err = inject.Error(err); err != nil {
  1054  				//nolint:lll
  1055  				is.log.Error().Err(err).Str("dstDigest", dstDigest.String()).Str("dst", dst).
  1056  					Str("component", "dedupe").Msg("failed to delete blob record")
  1057  
  1058  				return err
  1059  			}
  1060  
  1061  			goto retry
  1062  		}
  1063  
  1064  		// prevent overwrite original blob
  1065  		if !is.storeDriver.SameFile(dst, dstRecord) {
  1066  			if err := is.storeDriver.Link(dstRecord, dst); err != nil {
  1067  				is.log.Error().Err(err).Str("blobPath", dstRecord).Str("component", "dedupe").
  1068  					Msg("failed to link blobs")
  1069  
  1070  				return err
  1071  			}
  1072  
  1073  			if err := is.cache.PutBlob(dstDigest, dst); err != nil {
  1074  				is.log.Error().Err(err).Str("blobPath", dst).Str("component", "dedupe").
  1075  					Msg("failed to insert blob record")
  1076  
  1077  				return err
  1078  			}
  1079  		} else {
  1080  			// if it's same file then it was already uploaded, check if blob is corrupted
  1081  			if desc, err := common.GetBlobDescriptorFromRepo(is, dstRepo, dstDigest, is.log); err == nil {
  1082  				// blob corrupted, replace content
  1083  				if desc.Size != blobInfo.Size() {
  1084  					if err := is.storeDriver.Move(src, dst); err != nil {
  1085  						is.log.Error().Err(err).Str("src", src).Str("dst", dst).Str("component", "dedupe").
  1086  							Msg("failed to rename blob")
  1087  
  1088  						return err
  1089  					}
  1090  
  1091  					is.log.Debug().Str("src", src).Str("component", "dedupe").Msg("remove")
  1092  
  1093  					return nil
  1094  				}
  1095  			}
  1096  		}
  1097  
  1098  		// remove temp blobupload
  1099  		if err := is.storeDriver.Delete(src); err != nil {
  1100  			is.log.Error().Err(err).Str("src", src).Str("component", "dedupe").
  1101  				Msg("failed to remove blob")
  1102  
  1103  			return err
  1104  		}
  1105  
  1106  		is.log.Debug().Str("src", src).Str("component", "dedupe").Msg("remove")
  1107  	}
  1108  
  1109  	return nil
  1110  }
  1111  
  1112  // DeleteBlobUpload deletes an existing blob upload that is currently in progress.
  1113  func (is *ImageStore) DeleteBlobUpload(repo, uuid string) error {
  1114  	blobUploadPath := is.BlobUploadPath(repo, uuid)
  1115  
  1116  	writer, err := is.storeDriver.Writer(blobUploadPath, true)
  1117  	if err != nil {
  1118  		if errors.As(err, &driver.PathNotFoundError{}) {
  1119  			return zerr.ErrUploadNotFound
  1120  		}
  1121  
  1122  		return err
  1123  	}
  1124  
  1125  	defer writer.Close()
  1126  
  1127  	if err := writer.Cancel(); err != nil {
  1128  		is.log.Error().Err(err).Str("blobUploadPath", blobUploadPath).Msg("failed to delete blob upload")
  1129  
  1130  		return err
  1131  	}
  1132  
  1133  	return nil
  1134  }
  1135  
  1136  // BlobPath returns the repository path of a blob.
  1137  func (is *ImageStore) BlobPath(repo string, digest godigest.Digest) string {
  1138  	return path.Join(is.rootDir, repo, "blobs", digest.Algorithm().String(), digest.Encoded())
  1139  }
  1140  
  1141  /*
  1142  	CheckBlob verifies a blob and returns true if the blob is correct
  1143  
  1144  If the blob is not found but it's found in cache then it will be copied over.
  1145  */
  1146  func (is *ImageStore) CheckBlob(repo string, digest godigest.Digest) (bool, int64, error) {
  1147  	var lockLatency time.Time
  1148  
  1149  	if err := digest.Validate(); err != nil {
  1150  		return false, -1, err
  1151  	}
  1152  
  1153  	blobPath := is.BlobPath(repo, digest)
  1154  
  1155  	if is.dedupe && fmt.Sprintf("%v", is.cache) != fmt.Sprintf("%v", nil) {
  1156  		is.Lock(&lockLatency)
  1157  		defer is.Unlock(&lockLatency)
  1158  	} else {
  1159  		is.RLock(&lockLatency)
  1160  		defer is.RUnlock(&lockLatency)
  1161  	}
  1162  
  1163  	binfo, err := is.storeDriver.Stat(blobPath)
  1164  	if err == nil && binfo.Size() > 0 {
  1165  		// try to find blob size in blob descriptors, if blob can not be found
  1166  		desc, err := common.GetBlobDescriptorFromRepo(is, repo, digest, is.log)
  1167  		if err != nil || desc.Size == binfo.Size() {
  1168  			// blob not found in descriptors, can not compare, just return
  1169  			is.log.Debug().Str("blob path", blobPath).Msg("blob path found")
  1170  
  1171  			return true, binfo.Size(), nil //nolint: nilerr
  1172  		}
  1173  
  1174  		if desc.Size != binfo.Size() {
  1175  			is.log.Debug().Str("blob path", blobPath).Msg("blob path found, but it's corrupted")
  1176  
  1177  			return false, -1, zerr.ErrBlobNotFound
  1178  		}
  1179  	}
  1180  	// otherwise is a 'deduped' blob (empty file)
  1181  
  1182  	// Check blobs in cache
  1183  	dstRecord, err := is.checkCacheBlob(digest)
  1184  	if err != nil {
  1185  		is.log.Warn().Err(err).Str("digest", digest.String()).Msg("not found in cache")
  1186  
  1187  		return false, -1, zerr.ErrBlobNotFound
  1188  	}
  1189  
  1190  	blobSize, err := is.copyBlob(repo, blobPath, dstRecord)
  1191  	if err != nil {
  1192  		return false, -1, zerr.ErrBlobNotFound
  1193  	}
  1194  
  1195  	// put deduped blob in cache
  1196  	if err := is.cache.PutBlob(digest, blobPath); err != nil {
  1197  		is.log.Error().Err(err).Str("blobPath", blobPath).Str("component", "dedupe").Msg("failed to insert blob record")
  1198  
  1199  		return false, -1, err
  1200  	}
  1201  
  1202  	return true, blobSize, nil
  1203  }
  1204  
  1205  // StatBlob verifies if a blob is present inside a repository. The caller function MUST lock from outside.
  1206  func (is *ImageStore) StatBlob(repo string, digest godigest.Digest) (bool, int64, time.Time, error) {
  1207  	if err := digest.Validate(); err != nil {
  1208  		return false, -1, time.Time{}, err
  1209  	}
  1210  
  1211  	binfo, err := is.originalBlobInfo(repo, digest)
  1212  	if err != nil {
  1213  		return false, -1, time.Time{}, err
  1214  	}
  1215  
  1216  	return true, binfo.Size(), binfo.ModTime(), nil
  1217  }
  1218  
  1219  func (is *ImageStore) checkCacheBlob(digest godigest.Digest) (string, error) {
  1220  	if err := digest.Validate(); err != nil {
  1221  		return "", err
  1222  	}
  1223  
  1224  	if fmt.Sprintf("%v", is.cache) == fmt.Sprintf("%v", nil) {
  1225  		return "", zerr.ErrBlobNotFound
  1226  	}
  1227  
  1228  	dstRecord, err := is.cache.GetBlob(digest)
  1229  	if err != nil {
  1230  		return "", err
  1231  	}
  1232  
  1233  	if is.cache.UsesRelativePaths() {
  1234  		dstRecord = path.Join(is.rootDir, dstRecord)
  1235  	}
  1236  
  1237  	if _, err := is.storeDriver.Stat(dstRecord); err != nil {
  1238  		is.log.Error().Err(err).Str("blob", dstRecord).Msg("failed to stat blob")
  1239  
  1240  		// the actual blob on disk may have been removed by GC, so sync the cache
  1241  		if err := is.cache.DeleteBlob(digest, dstRecord); err != nil {
  1242  			is.log.Error().Err(err).Str("digest", digest.String()).Str("blobPath", dstRecord).
  1243  				Msg("failed to remove blob path from cache")
  1244  
  1245  			return "", err
  1246  		}
  1247  
  1248  		return "", zerr.ErrBlobNotFound
  1249  	}
  1250  
  1251  	is.log.Debug().Str("digest", digest.String()).Str("dstRecord", dstRecord).Str("component", "cache").
  1252  		Msg("found dedupe record")
  1253  
  1254  	return dstRecord, nil
  1255  }
  1256  
  1257  func (is *ImageStore) copyBlob(repo string, blobPath, dstRecord string) (int64, error) {
  1258  	if err := is.initRepo(repo); err != nil {
  1259  		is.log.Error().Err(err).Str("repository", repo).Msg("failed to initialize an empty repo")
  1260  
  1261  		return -1, err
  1262  	}
  1263  
  1264  	_ = is.storeDriver.EnsureDir(filepath.Dir(blobPath))
  1265  
  1266  	if err := is.storeDriver.Link(dstRecord, blobPath); err != nil {
  1267  		is.log.Error().Err(err).Str("blobPath", blobPath).Str("link", dstRecord).Str("component", "dedupe").
  1268  			Msg("failed to hard link")
  1269  
  1270  		return -1, zerr.ErrBlobNotFound
  1271  	}
  1272  
  1273  	// return original blob with content instead of the deduped one (blobPath)
  1274  	binfo, err := is.storeDriver.Stat(dstRecord)
  1275  	if err == nil {
  1276  		return binfo.Size(), nil
  1277  	}
  1278  
  1279  	return -1, zerr.ErrBlobNotFound
  1280  }
  1281  
  1282  // GetBlobPartial returns a partial stream to read the blob.
  1283  // blob selector instead of directly downloading the blob.
  1284  func (is *ImageStore) GetBlobPartial(repo string, digest godigest.Digest, mediaType string, from, to int64,
  1285  ) (io.ReadCloser, int64, int64, error) {
  1286  	var lockLatency time.Time
  1287  
  1288  	if err := digest.Validate(); err != nil {
  1289  		return nil, -1, -1, err
  1290  	}
  1291  
  1292  	is.RLock(&lockLatency)
  1293  	defer is.RUnlock(&lockLatency)
  1294  
  1295  	binfo, err := is.originalBlobInfo(repo, digest)
  1296  	if err != nil {
  1297  		return nil, -1, -1, err
  1298  	}
  1299  
  1300  	end := to
  1301  
  1302  	if to < 0 || to >= binfo.Size() {
  1303  		end = binfo.Size() - 1
  1304  	}
  1305  
  1306  	blobHandle, err := is.storeDriver.Reader(binfo.Path(), from)
  1307  	if err != nil {
  1308  		is.log.Error().Err(err).Str("blob", binfo.Path()).Msg("failed to open blob")
  1309  
  1310  		return nil, -1, -1, err
  1311  	}
  1312  
  1313  	blobReadCloser, err := newBlobStream(blobHandle, from, end)
  1314  	if err != nil {
  1315  		is.log.Error().Err(err).Str("blob", binfo.Path()).Msg("failed to open blob stream")
  1316  
  1317  		return nil, -1, -1, err
  1318  	}
  1319  
  1320  	// The caller function is responsible for calling Close()
  1321  	return blobReadCloser, end - from + 1, binfo.Size(), nil
  1322  }
  1323  
  1324  /*
  1325  	In the case of s3(which doesn't support links) we link them in our cache by
  1326  	keeping a reference to the original blob and its duplicates
  1327  
  1328  On the storage, original blobs are those with contents, and duplicates one are just empty files.
  1329  This function helps handling this situation, by using this one you can make sure you always get the original blob.
  1330  */
  1331  func (is *ImageStore) originalBlobInfo(repo string, digest godigest.Digest) (driver.FileInfo, error) {
  1332  	blobPath := is.BlobPath(repo, digest)
  1333  
  1334  	binfo, err := is.storeDriver.Stat(blobPath)
  1335  	if err != nil {
  1336  		is.log.Error().Err(err).Str("blob", blobPath).Msg("failed to stat blob")
  1337  
  1338  		return nil, zerr.ErrBlobNotFound
  1339  	}
  1340  
  1341  	if binfo.Size() == 0 {
  1342  		dstRecord, err := is.checkCacheBlob(digest)
  1343  		if err != nil {
  1344  			is.log.Debug().Err(err).Str("digest", digest.String()).Msg("not found in cache")
  1345  
  1346  			return nil, zerr.ErrBlobNotFound
  1347  		}
  1348  
  1349  		binfo, err = is.storeDriver.Stat(dstRecord)
  1350  		if err != nil {
  1351  			is.log.Error().Err(err).Str("blob", dstRecord).Msg("failed to stat blob")
  1352  
  1353  			return nil, zerr.ErrBlobNotFound
  1354  		}
  1355  	}
  1356  
  1357  	return binfo, nil
  1358  }
  1359  
  1360  // GetBlob returns a stream to read the blob.
  1361  // blob selector instead of directly downloading the blob.
  1362  func (is *ImageStore) GetBlob(repo string, digest godigest.Digest, mediaType string) (io.ReadCloser, int64, error) {
  1363  	var lockLatency time.Time
  1364  
  1365  	if err := digest.Validate(); err != nil {
  1366  		return nil, -1, err
  1367  	}
  1368  
  1369  	is.RLock(&lockLatency)
  1370  	defer is.RUnlock(&lockLatency)
  1371  
  1372  	binfo, err := is.originalBlobInfo(repo, digest)
  1373  	if err != nil {
  1374  		return nil, -1, err
  1375  	}
  1376  
  1377  	blobReadCloser, err := is.storeDriver.Reader(binfo.Path(), 0)
  1378  	if err != nil {
  1379  		is.log.Error().Err(err).Str("blob", binfo.Path()).Msg("failed to open blob")
  1380  
  1381  		return nil, -1, err
  1382  	}
  1383  
  1384  	// The caller function is responsible for calling Close()
  1385  	return blobReadCloser, binfo.Size(), nil
  1386  }
  1387  
  1388  // GetBlobContent returns blob contents, the caller function MUST lock from outside.
  1389  // Should be used for small files(manifests/config blobs).
  1390  func (is *ImageStore) GetBlobContent(repo string, digest godigest.Digest) ([]byte, error) {
  1391  	if err := digest.Validate(); err != nil {
  1392  		return []byte{}, err
  1393  	}
  1394  
  1395  	binfo, err := is.originalBlobInfo(repo, digest)
  1396  	if err != nil {
  1397  		return nil, err
  1398  	}
  1399  
  1400  	blobBuf, err := is.storeDriver.ReadFile(binfo.Path())
  1401  	if err != nil {
  1402  		is.log.Error().Err(err).Str("blob", binfo.Path()).Msg("failed to open blob")
  1403  
  1404  		return nil, err
  1405  	}
  1406  
  1407  	return blobBuf, nil
  1408  }
  1409  
  1410  // VerifyBlobDigestValue verifies that the blob which is addressed by given digest has a equivalent computed digest.
  1411  func (is *ImageStore) VerifyBlobDigestValue(repo string, digest godigest.Digest) error {
  1412  	if err := digest.Validate(); err != nil {
  1413  		return err
  1414  	}
  1415  
  1416  	binfo, err := is.originalBlobInfo(repo, digest)
  1417  	if err != nil {
  1418  		return err
  1419  	}
  1420  
  1421  	blobReadCloser, err := is.storeDriver.Reader(binfo.Path(), 0)
  1422  	if err != nil {
  1423  		return err
  1424  	}
  1425  
  1426  	defer blobReadCloser.Close()
  1427  
  1428  	// compute its real digest
  1429  	computedDigest, err := godigest.FromReader(blobReadCloser)
  1430  	if err != nil {
  1431  		return err
  1432  	}
  1433  
  1434  	// if the computed digest is different than the blob name(its initial digest) then the blob has been corrupted.
  1435  	if computedDigest != digest {
  1436  		return zerr.ErrBadBlobDigest
  1437  	}
  1438  
  1439  	return nil
  1440  }
  1441  
  1442  func (is *ImageStore) GetReferrers(repo string, gdigest godigest.Digest, artifactTypes []string,
  1443  ) (ispec.Index, error) {
  1444  	var lockLatency time.Time
  1445  
  1446  	is.RLock(&lockLatency)
  1447  	defer is.RUnlock(&lockLatency)
  1448  
  1449  	return common.GetReferrers(is, repo, gdigest, artifactTypes, is.log)
  1450  }
  1451  
  1452  // GetIndexContent returns index.json contents, the caller function MUST lock from outside.
  1453  func (is *ImageStore) GetIndexContent(repo string) ([]byte, error) {
  1454  	dir := path.Join(is.rootDir, repo)
  1455  
  1456  	buf, err := is.storeDriver.ReadFile(path.Join(dir, "index.json"))
  1457  	if err != nil {
  1458  		if errors.Is(err, driver.PathNotFoundError{}) {
  1459  			is.log.Error().Err(err).Str("dir", dir).Msg("failed to read index.json")
  1460  
  1461  			return []byte{}, zerr.ErrRepoNotFound
  1462  		}
  1463  
  1464  		is.log.Error().Err(err).Str("dir", dir).Msg("failed to read index.json")
  1465  
  1466  		return []byte{}, err
  1467  	}
  1468  
  1469  	return buf, nil
  1470  }
  1471  
  1472  func (is *ImageStore) StatIndex(repo string) (bool, int64, time.Time, error) {
  1473  	repoIndexPath := path.Join(is.rootDir, repo, "index.json")
  1474  
  1475  	fileInfo, err := is.storeDriver.Stat(repoIndexPath)
  1476  	if err != nil {
  1477  		if errors.As(err, &driver.PathNotFoundError{}) {
  1478  			is.log.Error().Err(err).Str("indexFile", repoIndexPath).Msg("failed to stat index.json")
  1479  
  1480  			return false, 0, time.Time{}, zerr.ErrRepoNotFound
  1481  		}
  1482  
  1483  		is.log.Error().Err(err).Str("indexFile", repoIndexPath).Msg("failed to read index.json")
  1484  
  1485  		return false, 0, time.Time{}, err
  1486  	}
  1487  
  1488  	return true, fileInfo.Size(), fileInfo.ModTime(), nil
  1489  }
  1490  
  1491  func (is *ImageStore) PutIndexContent(repo string, index ispec.Index) error {
  1492  	dir := path.Join(is.rootDir, repo)
  1493  
  1494  	indexPath := path.Join(dir, "index.json")
  1495  
  1496  	buf, err := json.Marshal(index)
  1497  	if err != nil {
  1498  		is.log.Error().Err(err).Str("file", indexPath).Msg("failed to marshal JSON")
  1499  
  1500  		return err
  1501  	}
  1502  
  1503  	if _, err = is.storeDriver.WriteFile(indexPath, buf); err != nil {
  1504  		is.log.Error().Err(err).Str("file", indexPath).Msg("failed to write")
  1505  
  1506  		return err
  1507  	}
  1508  
  1509  	return nil
  1510  }
  1511  
  1512  // DeleteBlob removes the blob from the repository.
  1513  func (is *ImageStore) DeleteBlob(repo string, digest godigest.Digest) error {
  1514  	var lockLatency time.Time
  1515  
  1516  	if err := digest.Validate(); err != nil {
  1517  		return err
  1518  	}
  1519  
  1520  	is.Lock(&lockLatency)
  1521  	defer is.Unlock(&lockLatency)
  1522  
  1523  	return is.deleteBlob(repo, digest)
  1524  }
  1525  
  1526  /*
  1527  CleanupRepo removes blobs from the repository and removes repo if flag is true and all blobs were removed
  1528  the caller function MUST lock from outside.
  1529  */
  1530  func (is *ImageStore) CleanupRepo(repo string, blobs []godigest.Digest, removeRepo bool) (int, error) {
  1531  	count := 0
  1532  
  1533  	for _, digest := range blobs {
  1534  		is.log.Debug().Str("repository", repo).
  1535  			Str("digest", digest.String()).Msg("perform GC on blob")
  1536  
  1537  		if err := is.deleteBlob(repo, digest); err != nil {
  1538  			if errors.Is(err, zerr.ErrBlobReferenced) {
  1539  				if err := is.deleteImageManifest(repo, digest.String(), true); err != nil {
  1540  					if errors.Is(err, zerr.ErrManifestConflict) || errors.Is(err, zerr.ErrManifestReferenced) {
  1541  						continue
  1542  					}
  1543  
  1544  					is.log.Error().Err(err).Str("repository", repo).Str("digest", digest.String()).Msg("failed to delete manifest")
  1545  
  1546  					return count, err
  1547  				}
  1548  
  1549  				count++
  1550  			} else {
  1551  				is.log.Error().Err(err).Str("repository", repo).Str("digest", digest.String()).Msg("failed to delete blob")
  1552  
  1553  				return count, err
  1554  			}
  1555  		} else {
  1556  			count++
  1557  		}
  1558  	}
  1559  
  1560  	blobUploads, err := is.storeDriver.List(path.Join(is.RootDir(), repo, storageConstants.BlobUploadDir))
  1561  	if err != nil {
  1562  		is.log.Debug().Str("repository", repo).Msg("failed to list .uploads/ dir")
  1563  	}
  1564  
  1565  	// if removeRepo flag is true and we cleanup all blobs and there are no blobs currently being uploaded.
  1566  	if removeRepo && count == len(blobs) && count > 0 && len(blobUploads) == 0 {
  1567  		is.log.Info().Str("repository", repo).Msg("removed all blobs, removing repo")
  1568  
  1569  		if err := is.storeDriver.Delete(path.Join(is.rootDir, repo)); err != nil {
  1570  			is.log.Error().Err(err).Str("repository", repo).Msg("failed to remove repo")
  1571  
  1572  			return count, err
  1573  		}
  1574  	}
  1575  
  1576  	return count, nil
  1577  }
  1578  
  1579  func (is *ImageStore) deleteBlob(repo string, digest godigest.Digest) error {
  1580  	blobPath := is.BlobPath(repo, digest)
  1581  
  1582  	_, err := is.storeDriver.Stat(blobPath)
  1583  	if err != nil {
  1584  		is.log.Error().Err(err).Str("blob", blobPath).Msg("failed to stat blob")
  1585  
  1586  		return zerr.ErrBlobNotFound
  1587  	}
  1588  
  1589  	// first check if this blob is not currently in use
  1590  	if ok, _ := common.IsBlobReferenced(is, repo, digest, is.log); ok {
  1591  		return zerr.ErrBlobReferenced
  1592  	}
  1593  
  1594  	if fmt.Sprintf("%v", is.cache) != fmt.Sprintf("%v", nil) {
  1595  		dstRecord, err := is.cache.GetBlob(digest)
  1596  		if err != nil && !errors.Is(err, zerr.ErrCacheMiss) {
  1597  			is.log.Error().Err(err).Str("blobPath", dstRecord).Str("component", "dedupe").
  1598  				Msg("failed to lookup blob record")
  1599  
  1600  			return err
  1601  		}
  1602  
  1603  		// remove cache entry and move blob contents to the next candidate if there is any
  1604  		if ok := is.cache.HasBlob(digest, blobPath); ok {
  1605  			if err := is.cache.DeleteBlob(digest, blobPath); err != nil {
  1606  				is.log.Error().Err(err).Str("digest", digest.String()).Str("blobPath", blobPath).
  1607  					Msg("failed to remove blob path from cache")
  1608  
  1609  				return err
  1610  			}
  1611  		}
  1612  
  1613  		// if the deleted blob is one with content
  1614  		if dstRecord == blobPath {
  1615  			// get next candidate
  1616  			dstRecord, err := is.cache.GetBlob(digest)
  1617  			if err != nil && !errors.Is(err, zerr.ErrCacheMiss) {
  1618  				is.log.Error().Err(err).Str("blobPath", dstRecord).Str("component", "dedupe").
  1619  					Msg("failed to lookup blob record")
  1620  
  1621  				return err
  1622  			}
  1623  
  1624  			// if we have a new candidate move the blob content to it
  1625  			if dstRecord != "" {
  1626  				/* check to see if we need to move the content from original blob to duplicate one
  1627  				(in case of filesystem, this should not be needed */
  1628  				binfo, err := is.storeDriver.Stat(dstRecord)
  1629  				if err != nil {
  1630  					is.log.Error().Err(err).Str("path", blobPath).Str("component", "dedupe").
  1631  						Msg("failed to stat blob")
  1632  
  1633  					return err
  1634  				}
  1635  
  1636  				if binfo.Size() == 0 {
  1637  					if err := is.storeDriver.Move(blobPath, dstRecord); err != nil {
  1638  						is.log.Error().Err(err).Str("blobPath", blobPath).Str("component", "dedupe").
  1639  							Msg("failed to remove blob path")
  1640  
  1641  						return err
  1642  					}
  1643  				}
  1644  
  1645  				return nil
  1646  			}
  1647  		}
  1648  	}
  1649  
  1650  	if err := is.storeDriver.Delete(blobPath); err != nil {
  1651  		is.log.Error().Err(err).Str("blobPath", blobPath).Msg("failed to remove blob path")
  1652  
  1653  		return err
  1654  	}
  1655  
  1656  	return nil
  1657  }
  1658  
  1659  func getBlobDigest(imgStore *ImageStore, path string) (godigest.Digest, error) {
  1660  	fileReader, err := imgStore.storeDriver.Reader(path, 0)
  1661  	if err != nil {
  1662  		return "", zerr.ErrUploadNotFound
  1663  	}
  1664  
  1665  	defer fileReader.Close()
  1666  
  1667  	digest, err := godigest.FromReader(fileReader)
  1668  	if err != nil {
  1669  		return "", zerr.ErrBadBlobDigest
  1670  	}
  1671  
  1672  	return digest, nil
  1673  }
  1674  
  1675  func (is *ImageStore) GetAllBlobs(repo string) ([]string, error) {
  1676  	dir := path.Join(is.rootDir, repo, "blobs", "sha256")
  1677  
  1678  	files, err := is.storeDriver.List(dir)
  1679  	if err != nil {
  1680  		if errors.As(err, &driver.PathNotFoundError{}) {
  1681  			is.log.Debug().Msg("empty rootDir")
  1682  
  1683  			return []string{}, nil
  1684  		}
  1685  
  1686  		return []string{}, err
  1687  	}
  1688  
  1689  	ret := []string{}
  1690  
  1691  	for _, file := range files {
  1692  		ret = append(ret, filepath.Base(file))
  1693  	}
  1694  
  1695  	return ret, nil
  1696  }
  1697  
  1698  func (is *ImageStore) GetNextDigestWithBlobPaths(repos []string, lastDigests []godigest.Digest,
  1699  ) (godigest.Digest, []string, error) {
  1700  	var lockLatency time.Time
  1701  
  1702  	dir := is.rootDir
  1703  
  1704  	is.RLock(&lockLatency)
  1705  	defer is.RUnlock(&lockLatency)
  1706  
  1707  	var duplicateBlobs []string
  1708  
  1709  	var digest godigest.Digest
  1710  
  1711  	err := is.storeDriver.Walk(dir, func(fileInfo driver.FileInfo) error {
  1712  		// skip blobs under .sync and .uploads
  1713  		if strings.HasSuffix(fileInfo.Path(), syncConstants.SyncBlobUploadDir) ||
  1714  			strings.HasSuffix(fileInfo.Path(), storageConstants.BlobUploadDir) {
  1715  			return driver.ErrSkipDir
  1716  		}
  1717  
  1718  		if fileInfo.IsDir() {
  1719  			// skip repositories not found in repos
  1720  			repo := path.Base(fileInfo.Path())
  1721  
  1722  			if !zcommon.Contains(repos, repo) && repo != "blobs" && repo != "sha256" {
  1723  				return driver.ErrSkipDir
  1724  			}
  1725  		}
  1726  
  1727  		blobDigest := godigest.NewDigestFromEncoded("sha256", path.Base(fileInfo.Path()))
  1728  		if err := blobDigest.Validate(); err != nil { //nolint: nilerr
  1729  			return nil //nolint: nilerr // ignore files which are not blobs
  1730  		}
  1731  
  1732  		if digest == "" && !zcommon.Contains(lastDigests, blobDigest) {
  1733  			digest = blobDigest
  1734  		}
  1735  
  1736  		if blobDigest == digest {
  1737  			duplicateBlobs = append(duplicateBlobs, fileInfo.Path())
  1738  		}
  1739  
  1740  		return nil
  1741  	})
  1742  
  1743  	// if the root directory is not yet created
  1744  	var perr driver.PathNotFoundError
  1745  
  1746  	if errors.As(err, &perr) {
  1747  		return digest, duplicateBlobs, nil
  1748  	}
  1749  
  1750  	return digest, duplicateBlobs, err
  1751  }
  1752  
  1753  func (is *ImageStore) getOriginalBlobFromDisk(duplicateBlobs []string) (string, error) {
  1754  	for _, blobPath := range duplicateBlobs {
  1755  		binfo, err := is.storeDriver.Stat(blobPath)
  1756  		if err != nil {
  1757  			is.log.Error().Err(err).Str("path", blobPath).Str("component", "storage").Msg("failed to stat blob")
  1758  
  1759  			return "", zerr.ErrBlobNotFound
  1760  		}
  1761  
  1762  		if binfo.Size() > 0 {
  1763  			return blobPath, nil
  1764  		}
  1765  	}
  1766  
  1767  	return "", zerr.ErrBlobNotFound
  1768  }
  1769  
  1770  func (is *ImageStore) getOriginalBlob(digest godigest.Digest, duplicateBlobs []string) (string, error) {
  1771  	var originalBlob string
  1772  
  1773  	var err error
  1774  
  1775  	originalBlob, err = is.checkCacheBlob(digest)
  1776  	if err != nil && !errors.Is(err, zerr.ErrBlobNotFound) && !errors.Is(err, zerr.ErrCacheMiss) {
  1777  		is.log.Error().Err(err).Str("component", "dedupe").Msg("failed to find blob in cache")
  1778  
  1779  		return originalBlob, err
  1780  	}
  1781  
  1782  	// if we still don't have, search it
  1783  	if originalBlob == "" {
  1784  		is.log.Warn().Str("component", "dedupe").Msg("failed to find blob in cache, searching it in storage...")
  1785  		// a rebuild dedupe was attempted in the past
  1786  		// get original blob, should be found otherwise exit with error
  1787  
  1788  		originalBlob, err = is.getOriginalBlobFromDisk(duplicateBlobs)
  1789  		if err != nil {
  1790  			return originalBlob, err
  1791  		}
  1792  	}
  1793  
  1794  	is.log.Info().Str("originalBlob", originalBlob).Str("component", "dedupe").Msg("found original blob")
  1795  
  1796  	return originalBlob, nil
  1797  }
  1798  
  1799  func (is *ImageStore) dedupeBlobs(ctx context.Context, digest godigest.Digest, duplicateBlobs []string) error {
  1800  	if fmt.Sprintf("%v", is.cache) == fmt.Sprintf("%v", nil) {
  1801  		is.log.Error().Err(zerr.ErrDedupeRebuild).Msg("failed to dedupe blobs, no cache driver found")
  1802  
  1803  		return zerr.ErrDedupeRebuild
  1804  	}
  1805  
  1806  	is.log.Info().Str("digest", digest.String()).Str("component", "dedupe").Msg("deduping blobs for digest")
  1807  
  1808  	var originalBlob string
  1809  
  1810  	// rebuild from dedupe false to true
  1811  	for _, blobPath := range duplicateBlobs {
  1812  		if zcommon.IsContextDone(ctx) {
  1813  			return ctx.Err()
  1814  		}
  1815  
  1816  		binfo, err := is.storeDriver.Stat(blobPath)
  1817  		if err != nil {
  1818  			is.log.Error().Err(err).Str("path", blobPath).Str("component", "dedupe").Msg("failed to stat blob")
  1819  
  1820  			return err
  1821  		}
  1822  
  1823  		if binfo.Size() == 0 {
  1824  			is.log.Warn().Str("component", "dedupe").Msg("found file without content, trying to find the original blob")
  1825  			// a rebuild dedupe was attempted in the past
  1826  			// get original blob, should be found otherwise exit with error
  1827  			if originalBlob == "" {
  1828  				originalBlob, err = is.getOriginalBlob(digest, duplicateBlobs)
  1829  				if err != nil {
  1830  					is.log.Error().Err(err).Str("component", "dedupe").Msg("failed to find original blob")
  1831  
  1832  					return zerr.ErrDedupeRebuild
  1833  				}
  1834  
  1835  				// cache original blob
  1836  				if ok := is.cache.HasBlob(digest, originalBlob); !ok {
  1837  					if err := is.cache.PutBlob(digest, originalBlob); err != nil {
  1838  						return err
  1839  					}
  1840  				}
  1841  			}
  1842  
  1843  			// cache dedupe blob
  1844  			if ok := is.cache.HasBlob(digest, blobPath); !ok {
  1845  				if err := is.cache.PutBlob(digest, blobPath); err != nil {
  1846  					return err
  1847  				}
  1848  			}
  1849  		} else {
  1850  			// if we have an original blob cached then we can safely dedupe the rest of them
  1851  			if originalBlob != "" {
  1852  				if err := is.storeDriver.Link(originalBlob, blobPath); err != nil {
  1853  					is.log.Error().Err(err).Str("path", blobPath).Str("component", "dedupe").Msg("failed to dedupe blob")
  1854  
  1855  					return err
  1856  				}
  1857  			}
  1858  
  1859  			// cache it
  1860  			if ok := is.cache.HasBlob(digest, blobPath); !ok {
  1861  				if err := is.cache.PutBlob(digest, blobPath); err != nil {
  1862  					return err
  1863  				}
  1864  			}
  1865  
  1866  			// mark blob as preserved
  1867  			originalBlob = blobPath
  1868  		}
  1869  	}
  1870  
  1871  	is.log.Info().Str("digest", digest.String()).Str("component", "dedupe").
  1872  		Msg("deduping blobs for digest finished successfully")
  1873  
  1874  	return nil
  1875  }
  1876  
  1877  func (is *ImageStore) restoreDedupedBlobs(ctx context.Context, digest godigest.Digest, duplicateBlobs []string) error {
  1878  	is.log.Info().Str("digest", digest.String()).Str("component", "dedupe").Msg("restoring deduped blobs for digest")
  1879  
  1880  	// first we need to find the original blob, either in cache or by checking each blob size
  1881  	originalBlob, err := is.getOriginalBlob(digest, duplicateBlobs)
  1882  	if err != nil {
  1883  		is.log.Error().Err(err).Str("component", "dedupe").Msg("failed to find original blob")
  1884  
  1885  		return zerr.ErrDedupeRebuild
  1886  	}
  1887  
  1888  	for _, blobPath := range duplicateBlobs {
  1889  		if zcommon.IsContextDone(ctx) {
  1890  			return ctx.Err()
  1891  		}
  1892  
  1893  		binfo, err := is.storeDriver.Stat(blobPath)
  1894  		if err != nil {
  1895  			is.log.Error().Err(err).Str("path", blobPath).Str("component", "dedupe").Msg("failed to stat blob")
  1896  
  1897  			return err
  1898  		}
  1899  
  1900  		// if we find a deduped blob, then copy original blob content to deduped one
  1901  		if binfo.Size() == 0 {
  1902  			// move content from original blob to deduped one
  1903  			buf, err := is.storeDriver.ReadFile(originalBlob)
  1904  			if err != nil {
  1905  				is.log.Error().Err(err).Str("path", originalBlob).Str("component", "dedupe").
  1906  					Msg("failed to get original blob content")
  1907  
  1908  				return err
  1909  			}
  1910  
  1911  			_, err = is.storeDriver.WriteFile(blobPath, buf)
  1912  			if err != nil {
  1913  				return err
  1914  			}
  1915  		}
  1916  	}
  1917  
  1918  	is.log.Info().Str("digest", digest.String()).
  1919  		Str("component", "dedupe").Msg("restoring deduped blobs for digest finished successfully")
  1920  
  1921  	return nil
  1922  }
  1923  
  1924  func (is *ImageStore) RunDedupeForDigest(ctx context.Context, digest godigest.Digest, dedupe bool,
  1925  	duplicateBlobs []string,
  1926  ) error {
  1927  	var lockLatency time.Time
  1928  
  1929  	is.Lock(&lockLatency)
  1930  	defer is.Unlock(&lockLatency)
  1931  
  1932  	if dedupe {
  1933  		return is.dedupeBlobs(ctx, digest, duplicateBlobs)
  1934  	}
  1935  
  1936  	return is.restoreDedupedBlobs(ctx, digest, duplicateBlobs)
  1937  }
  1938  
  1939  func (is *ImageStore) RunDedupeBlobs(interval time.Duration, sch *scheduler.Scheduler) {
  1940  	generator := &common.DedupeTaskGenerator{
  1941  		ImgStore: is,
  1942  		Dedupe:   is.dedupe,
  1943  		Log:      is.log,
  1944  	}
  1945  
  1946  	sch.SubmitGenerator(generator, interval, scheduler.MediumPriority)
  1947  }
  1948  
  1949  func (is *ImageStore) PopulateStorageMetrics(interval time.Duration, sch *scheduler.Scheduler) {
  1950  	generator := &common.StorageMetricsInitGenerator{
  1951  		ImgStore: is,
  1952  		Metrics:  is.metrics,
  1953  		Log:      is.log,
  1954  		MaxDelay: 15, //nolint:gomnd
  1955  	}
  1956  
  1957  	sch.SubmitGenerator(generator, interval, scheduler.HighPriority)
  1958  }
  1959  
  1960  type blobStream struct {
  1961  	reader io.Reader
  1962  	closer io.Closer
  1963  }
  1964  
  1965  func newBlobStream(readCloser io.ReadCloser, from, to int64) (io.ReadCloser, error) {
  1966  	if from < 0 || to < from {
  1967  		return nil, zerr.ErrBadRange
  1968  	}
  1969  
  1970  	return &blobStream{reader: io.LimitReader(readCloser, to-from+1), closer: readCloser}, nil
  1971  }
  1972  
  1973  func (bs *blobStream) Read(buf []byte) (int, error) {
  1974  	return bs.reader.Read(buf)
  1975  }
  1976  
  1977  func (bs *blobStream) Close() error {
  1978  	return bs.closer.Close()
  1979  }