github.com/lusis/distribution@v2.0.1+incompatible/registry/storage/layercache.go (about)

     1  package storage
     2  
     3  import (
     4  	"expvar"
     5  	"sync/atomic"
     6  	"time"
     7  
     8  	"github.com/docker/distribution"
     9  	ctxu "github.com/docker/distribution/context"
    10  	"github.com/docker/distribution/digest"
    11  	"github.com/docker/distribution/registry/storage/cache"
    12  	"github.com/docker/distribution/registry/storage/driver"
    13  	"golang.org/x/net/context"
    14  )
    15  
    16  // cachedLayerService implements the layer service with path-aware caching,
    17  // using a LayerInfoCache interface.
    18  type cachedLayerService struct {
    19  	distribution.LayerService // upstream layer service
    20  	repository                distribution.Repository
    21  	ctx                       context.Context
    22  	driver                    driver.StorageDriver
    23  	*blobStore                // global blob store
    24  	cache                     cache.LayerInfoCache
    25  }
    26  
    27  // Exists checks for existence of the digest in the cache, immediately
    28  // returning if it exists for the repository. If not, the upstream is checked.
    29  // When a positive result is found, it is written into the cache.
    30  func (lc *cachedLayerService) Exists(dgst digest.Digest) (bool, error) {
    31  	ctxu.GetLogger(lc.ctx).Debugf("(*cachedLayerService).Exists(%q)", dgst)
    32  	now := time.Now()
    33  	defer func() {
    34  		// TODO(stevvooe): Replace this with a decent context-based metrics solution
    35  		ctxu.GetLoggerWithField(lc.ctx, "blob.exists.duration", time.Since(now)).
    36  			Infof("(*cachedLayerService).Exists(%q)", dgst)
    37  	}()
    38  
    39  	atomic.AddUint64(&layerInfoCacheMetrics.Exists.Requests, 1)
    40  	available, err := lc.cache.Contains(lc.ctx, lc.repository.Name(), dgst)
    41  	if err != nil {
    42  		ctxu.GetLogger(lc.ctx).Errorf("error checking availability of %v@%v: %v", lc.repository.Name(), dgst, err)
    43  		goto fallback
    44  	}
    45  
    46  	if available {
    47  		atomic.AddUint64(&layerInfoCacheMetrics.Exists.Hits, 1)
    48  		return true, nil
    49  	}
    50  
    51  fallback:
    52  	atomic.AddUint64(&layerInfoCacheMetrics.Exists.Misses, 1)
    53  	exists, err := lc.LayerService.Exists(dgst)
    54  	if err != nil {
    55  		return exists, err
    56  	}
    57  
    58  	if exists {
    59  		// we can only cache this if the existence is positive.
    60  		if err := lc.cache.Add(lc.ctx, lc.repository.Name(), dgst); err != nil {
    61  			ctxu.GetLogger(lc.ctx).Errorf("error adding %v@%v to cache: %v", lc.repository.Name(), dgst, err)
    62  		}
    63  	}
    64  
    65  	return exists, err
    66  }
    67  
    68  // Fetch checks for the availability of the layer in the repository via the
    69  // cache. If present, the metadata is resolved and the layer is returned. If
    70  // any operation fails, the layer is read directly from the upstream. The
    71  // results are cached, if possible.
    72  func (lc *cachedLayerService) Fetch(dgst digest.Digest) (distribution.Layer, error) {
    73  	ctxu.GetLogger(lc.ctx).Debugf("(*layerInfoCache).Fetch(%q)", dgst)
    74  	now := time.Now()
    75  	defer func() {
    76  		ctxu.GetLoggerWithField(lc.ctx, "blob.fetch.duration", time.Since(now)).
    77  			Infof("(*layerInfoCache).Fetch(%q)", dgst)
    78  	}()
    79  
    80  	atomic.AddUint64(&layerInfoCacheMetrics.Fetch.Requests, 1)
    81  	available, err := lc.cache.Contains(lc.ctx, lc.repository.Name(), dgst)
    82  	if err != nil {
    83  		ctxu.GetLogger(lc.ctx).Errorf("error checking availability of %v@%v: %v", lc.repository.Name(), dgst, err)
    84  		goto fallback
    85  	}
    86  
    87  	if available {
    88  		// fast path: get the layer info and return
    89  		meta, err := lc.cache.Meta(lc.ctx, dgst)
    90  		if err != nil {
    91  			ctxu.GetLogger(lc.ctx).Errorf("error fetching %v@%v from cache: %v", lc.repository.Name(), dgst, err)
    92  			goto fallback
    93  		}
    94  
    95  		atomic.AddUint64(&layerInfoCacheMetrics.Fetch.Hits, 1)
    96  		return newLayerReader(lc.driver, dgst, meta.Path, meta.Length)
    97  	}
    98  
    99  	// NOTE(stevvooe): Unfortunately, the cache here only makes checks for
   100  	// existing layers faster. We'd have to provide more careful
   101  	// synchronization with the backend to make the missing case as fast.
   102  
   103  fallback:
   104  	atomic.AddUint64(&layerInfoCacheMetrics.Fetch.Misses, 1)
   105  	layer, err := lc.LayerService.Fetch(dgst)
   106  	if err != nil {
   107  		return nil, err
   108  	}
   109  
   110  	// add the layer to the repository
   111  	if err := lc.cache.Add(lc.ctx, lc.repository.Name(), dgst); err != nil {
   112  		ctxu.GetLogger(lc.ctx).
   113  			Errorf("error caching repository relationship for %v@%v: %v", lc.repository.Name(), dgst, err)
   114  	}
   115  
   116  	// lookup layer path and add it to the cache, if it succeds. Note that we
   117  	// still return the layer even if we have trouble caching it.
   118  	if path, err := lc.resolveLayerPath(layer); err != nil {
   119  		ctxu.GetLogger(lc.ctx).
   120  			Errorf("error resolving path while caching %v@%v: %v", lc.repository.Name(), dgst, err)
   121  	} else {
   122  		// add the layer to the cache once we've resolved the path.
   123  		if err := lc.cache.SetMeta(lc.ctx, dgst, cache.LayerMeta{Path: path, Length: layer.Length()}); err != nil {
   124  			ctxu.GetLogger(lc.ctx).Errorf("error adding meta for %v@%v to cache: %v", lc.repository.Name(), dgst, err)
   125  		}
   126  	}
   127  
   128  	return layer, err
   129  }
   130  
   131  // extractLayerInfo pulls the layerInfo from the layer, attempting to get the
   132  // path information from either the concrete object or by resolving the
   133  // primary blob store path.
   134  func (lc *cachedLayerService) resolveLayerPath(layer distribution.Layer) (path string, err error) {
   135  	// try and resolve the type and driver, so we don't have to traverse links
   136  	switch v := layer.(type) {
   137  	case *layerReader:
   138  		// only set path if we have same driver instance.
   139  		if v.driver == lc.driver {
   140  			return v.path, nil
   141  		}
   142  	}
   143  
   144  	ctxu.GetLogger(lc.ctx).Warnf("resolving layer path during cache lookup (%v@%v)", lc.repository.Name(), layer.Digest())
   145  	// we have to do an expensive stat to resolve the layer location but no
   146  	// need to check the link, since we already have layer instance for this
   147  	// repository.
   148  	bp, err := lc.blobStore.path(layer.Digest())
   149  	if err != nil {
   150  		return "", err
   151  	}
   152  
   153  	return bp, nil
   154  }
   155  
   156  // layerInfoCacheMetrics keeps track of cache metrics for layer info cache
   157  // requests. Note this is kept globally and made available via expvar. For
   158  // more detailed metrics, its recommend to instrument a particular cache
   159  // implementation.
   160  var layerInfoCacheMetrics struct {
   161  	// Exists tracks calls to the Exists caches.
   162  	Exists struct {
   163  		Requests uint64
   164  		Hits     uint64
   165  		Misses   uint64
   166  	}
   167  
   168  	// Fetch tracks calls to the fetch caches.
   169  	Fetch struct {
   170  		Requests uint64
   171  		Hits     uint64
   172  		Misses   uint64
   173  	}
   174  }
   175  
   176  func init() {
   177  	registry := expvar.Get("registry")
   178  	if registry == nil {
   179  		registry = expvar.NewMap("registry")
   180  	}
   181  
   182  	cache := registry.(*expvar.Map).Get("cache")
   183  	if cache == nil {
   184  		cache = &expvar.Map{}
   185  		cache.(*expvar.Map).Init()
   186  		registry.(*expvar.Map).Set("cache", cache)
   187  	}
   188  
   189  	storage := cache.(*expvar.Map).Get("storage")
   190  	if storage == nil {
   191  		storage = &expvar.Map{}
   192  		storage.(*expvar.Map).Init()
   193  		cache.(*expvar.Map).Set("storage", storage)
   194  	}
   195  
   196  	storage.(*expvar.Map).Set("layerinfo", expvar.Func(func() interface{} {
   197  		// no need for synchronous access: the increments are atomic and
   198  		// during reading, we don't care if the data is up to date. The
   199  		// numbers will always *eventually* be reported correctly.
   200  		return layerInfoCacheMetrics
   201  	}))
   202  }