github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/util/cache/cache.go (about)

     1  // Copyright 2014 The Cockroach Authors.
     2  //
     3  // Use of this software is governed by the Business Source License
     4  // included in the file licenses/BSL.txt.
     5  //
     6  // As of the Change Date specified in that file, in accordance with
     7  // the Business Source License, use of this software will be governed
     8  // by the Apache License, Version 2.0, included in the file
     9  // licenses/APL.txt.
    10  //
    11  // This code is based on: https://github.com/golang/groupcache/
    12  
    13  package cache
    14  
    15  import (
    16  	"bytes"
    17  	"context"
    18  	"fmt"
    19  	"sync/atomic"
    20  
    21  	"github.com/biogo/store/llrb"
    22  	"github.com/cockroachdb/cockroach/pkg/util/interval"
    23  	"github.com/cockroachdb/cockroach/pkg/util/log"
    24  )
    25  
    26  // EvictionPolicy is the cache eviction policy enum.
    27  type EvictionPolicy int
    28  
    29  // Constants describing LRU and FIFO, and None cache eviction policies
    30  // respectively.
    31  const (
    32  	CacheLRU  EvictionPolicy = iota // Least recently used
    33  	CacheFIFO                       // First in, first out
    34  	CacheNone                       // No evictions; don't maintain ordering list
    35  )
    36  
    37  // A Config specifies the eviction policy, eviction
    38  // trigger callback, and eviction listener callback.
    39  type Config struct {
    40  	// Policy is one of the consts listed for EvictionPolicy.
    41  	Policy EvictionPolicy
    42  
    43  	// ShouldEvict is a callback function executed each time a new entry
    44  	// is added to the cache. It supplies cache size, and potential
    45  	// evictee's key and value. The function should return true if the
    46  	// entry may be evicted; false otherwise. For example, to support a
    47  	// maximum size for the cache, use a method like:
    48  	//
    49  	//   func(size int, key Key, value interface{}) { return size > maxSize }
    50  	//
    51  	// To support max TTL in the cache, use something like:
    52  	//
    53  	//   func(size int, key Key, value interface{}) {
    54  	//     return timeutil.Now().UnixNano() - value.(int64) > maxTTLNanos
    55  	//   }
    56  	ShouldEvict func(size int, key, value interface{}) bool
    57  
    58  	// OnEvicted optionally specifies a callback function to be
    59  	// executed when an entry is purged from the cache.
    60  	OnEvicted func(key, value interface{})
    61  
    62  	// OnEvictedEntry optionally specifies a callback function to
    63  	// be executed when an entry is purged from the cache.
    64  	OnEvictedEntry func(entry *Entry)
    65  }
    66  
    67  // Entry holds the key and value and a pointer to the linked list
    68  // which defines the eviction ordering.
    69  type Entry struct {
    70  	Key, Value interface{}
    71  	next, prev *Entry
    72  }
    73  
    74  func (e Entry) String() string {
    75  	return fmt.Sprintf("%s", e.Key)
    76  }
    77  
    78  // Compare implements the llrb.Comparable interface for cache entries.
    79  // This facility is used by the OrderedCache, and crucially requires
    80  // that keys used with that cache implement llrb.Comparable.
    81  func (e *Entry) Compare(b llrb.Comparable) int {
    82  	return e.Key.(llrb.Comparable).Compare(b.(*Entry).Key.(llrb.Comparable))
    83  }
    84  
    85  // The following methods implement the interval.Interface for entry by casting
    86  // the entry key to an interval key and calling the appropriate accessers.
    87  
    88  // ID implements interval.Interface
    89  func (e *Entry) ID() uintptr {
    90  	return e.Key.(*IntervalKey).id
    91  }
    92  
    93  // Range implements interval.Interface
    94  func (e *Entry) Range() interval.Range {
    95  	return e.Key.(*IntervalKey).Range
    96  }
    97  
    98  // entryList is a double-linked circular list of *Entry elements. The code is
    99  // derived from the stdlib container/list but customized to Entry in order to
   100  // avoid a separate allocation for every element.
   101  type entryList struct {
   102  	root Entry
   103  }
   104  
   105  func (l *entryList) init() {
   106  	l.root.next = &l.root
   107  	l.root.prev = &l.root
   108  }
   109  
   110  func (l *entryList) back() *Entry {
   111  	return l.root.prev
   112  }
   113  
   114  func (l *entryList) insertAfter(e, at *Entry) {
   115  	n := at.next
   116  	at.next = e
   117  	e.prev = at
   118  	e.next = n
   119  	n.prev = e
   120  }
   121  
   122  func (l *entryList) insertBefore(e, mark *Entry) {
   123  	l.insertAfter(e, mark.prev)
   124  }
   125  
   126  func (l *entryList) remove(e *Entry) *Entry {
   127  	if e == &l.root {
   128  		panic("cannot remove root list node")
   129  	}
   130  	// TODO(peter): Revert this protection against removing a non-existent entry
   131  	// from the list when the cause of
   132  	// https://github.com/cockroachdb/cockroach/issues/6190 is determined. Should
   133  	// be replaced with an explicit panic instead of the implicit one of a
   134  	// nil-pointer dereference.
   135  	if e.next != nil {
   136  		e.prev.next = e.next
   137  		e.next.prev = e.prev
   138  		e.next = nil // avoid memory leaks
   139  		e.prev = nil // avoid memory leaks
   140  	}
   141  	return e
   142  }
   143  
   144  func (l *entryList) pushFront(e *Entry) {
   145  	l.insertAfter(e, &l.root)
   146  }
   147  
   148  func (l *entryList) moveToFront(e *Entry) {
   149  	if l.root.next == e {
   150  		return
   151  	}
   152  	l.insertAfter(l.remove(e), &l.root)
   153  }
   154  
   155  // cacheStore is an interface for the backing store used for the cache.
   156  type cacheStore interface {
   157  	// init initializes or clears all entries.
   158  	init()
   159  	// get returns the entry by key.
   160  	get(key interface{}) *Entry
   161  	// add stores an entry.
   162  	add(e *Entry)
   163  	// del removes an entry.
   164  	del(e *Entry)
   165  	// len is number of items in store.
   166  	length() int
   167  }
   168  
   169  // baseCache contains the config, cacheStore interface, and the linked
   170  // list for eviction order.
   171  type baseCache struct {
   172  	Config
   173  	store cacheStore
   174  	ll    entryList
   175  }
   176  
   177  func newBaseCache(config Config) baseCache {
   178  	return baseCache{
   179  		Config: config,
   180  	}
   181  }
   182  
   183  // init initializes the baseCache with the provided cacheStore. It must be
   184  // called with a non-nil cacheStore before use of the cache.
   185  func (bc *baseCache) init(store cacheStore) {
   186  	bc.ll.init()
   187  	bc.store = store
   188  	bc.store.init()
   189  }
   190  
   191  // Add adds a value to the cache.
   192  func (bc *baseCache) Add(key, value interface{}) {
   193  	bc.add(key, value, nil, nil)
   194  }
   195  
   196  // AddEntry adds a value to the cache. It provides an alternative interface to
   197  // Add which the caller can use to reduce allocations by bundling the Entry
   198  // structure with the key and value to be stored.
   199  func (bc *baseCache) AddEntry(entry *Entry) {
   200  	bc.add(entry.Key, entry.Value, entry, nil)
   201  }
   202  
   203  // AddEntryAfter adds a value to the cache, making sure that it is placed after
   204  // the second entry in the eviction queue. It provides an alternative interface to
   205  // Add which the caller can use to reduce allocations by bundling the Entry
   206  // structure with the key and value to be stored.
   207  func (bc *baseCache) AddEntryAfter(entry, after *Entry) {
   208  	bc.add(entry.Key, entry.Value, entry, after)
   209  }
   210  
   211  // MoveToEnd moves the entry to the end of the eviction queue.
   212  func (bc *baseCache) MoveToEnd(entry *Entry) {
   213  	bc.ll.moveToFront(entry)
   214  }
   215  
   216  func (bc *baseCache) add(key, value interface{}, entry, after *Entry) {
   217  	if e := bc.store.get(key); e != nil {
   218  		bc.access(e)
   219  		e.Value = value
   220  		return
   221  	}
   222  	e := entry
   223  	if e == nil {
   224  		e = &Entry{Key: key, Value: value}
   225  	}
   226  	if after != nil {
   227  		bc.ll.insertBefore(e, after)
   228  	} else {
   229  		bc.ll.pushFront(e)
   230  	}
   231  	bc.store.add(e)
   232  	// Evict as many elements as we can.
   233  	for bc.evict() {
   234  	}
   235  }
   236  
   237  // Get looks up a key's value from the cache.
   238  func (bc *baseCache) Get(key interface{}) (value interface{}, ok bool) {
   239  	if e := bc.store.get(key); e != nil {
   240  		bc.access(e)
   241  		return e.Value, true
   242  	}
   243  	return
   244  }
   245  
   246  // Del removes the provided key from the cache.
   247  func (bc *baseCache) Del(key interface{}) {
   248  	e := bc.store.get(key)
   249  	bc.DelEntry(e)
   250  }
   251  
   252  // DelEntry removes the provided entry from the cache.
   253  func (bc *baseCache) DelEntry(entry *Entry) {
   254  	if entry != nil {
   255  		bc.removeElement(entry)
   256  	}
   257  }
   258  
   259  // Clear clears all entries from the cache.
   260  func (bc *baseCache) Clear() {
   261  	if bc.OnEvicted != nil || bc.OnEvictedEntry != nil {
   262  		for e := bc.ll.back(); e != &bc.ll.root; e = e.prev {
   263  			if bc.OnEvicted != nil {
   264  				bc.OnEvicted(e.Key, e.Value)
   265  			}
   266  			if bc.OnEvictedEntry != nil {
   267  				bc.OnEvictedEntry(e)
   268  			}
   269  		}
   270  	}
   271  	bc.ll.init()
   272  	bc.store.init()
   273  }
   274  
   275  // Len returns the number of items in the cache.
   276  func (bc *baseCache) Len() int {
   277  	return bc.store.length()
   278  }
   279  
   280  func (bc *baseCache) access(e *Entry) {
   281  	if bc.Policy == CacheLRU {
   282  		bc.ll.moveToFront(e)
   283  	}
   284  }
   285  
   286  func (bc *baseCache) removeElement(e *Entry) {
   287  	bc.ll.remove(e)
   288  	bc.store.del(e)
   289  	if bc.OnEvicted != nil {
   290  		bc.OnEvicted(e.Key, e.Value)
   291  	}
   292  	if bc.OnEvictedEntry != nil {
   293  		bc.OnEvictedEntry(e)
   294  	}
   295  }
   296  
   297  // evict removes the oldest item from the cache for FIFO and
   298  // the least recently used item for LRU. Returns true if an
   299  // entry was evicted, false otherwise.
   300  func (bc *baseCache) evict() bool {
   301  	if bc.ShouldEvict == nil || bc.Policy == CacheNone {
   302  		return false
   303  	}
   304  	l := bc.store.length()
   305  	if l > 0 {
   306  		e := bc.ll.back()
   307  		if bc.ShouldEvict(l, e.Key, e.Value) {
   308  			bc.removeElement(e)
   309  			return true
   310  		}
   311  	}
   312  	return false
   313  }
   314  
   315  // UnorderedCache is a cache which supports custom eviction triggers and two
   316  // eviction policies: LRU and FIFO. A listener pattern is available
   317  // for eviction events. This cache uses a hashmap for storing elements,
   318  // making it the most performant. Only exact lookups are supported.
   319  //
   320  // UnorderedCache requires that keys are comparable, according to the go
   321  // specification (http://golang.org/ref/spec#Comparison_operators).
   322  //
   323  // UnorderedCache is not safe for concurrent access.
   324  type UnorderedCache struct {
   325  	baseCache
   326  	hmap map[interface{}]interface{}
   327  }
   328  
   329  // NewUnorderedCache creates a new UnorderedCache backed by a hash map.
   330  func NewUnorderedCache(config Config) *UnorderedCache {
   331  	mc := &UnorderedCache{
   332  		baseCache: newBaseCache(config),
   333  	}
   334  	mc.baseCache.init(mc)
   335  	return mc
   336  }
   337  
   338  // Implementation of cacheStore interface.
   339  func (mc *UnorderedCache) init() {
   340  	mc.hmap = make(map[interface{}]interface{})
   341  }
   342  func (mc *UnorderedCache) get(key interface{}) *Entry {
   343  	if e, ok := mc.hmap[key].(*Entry); ok {
   344  		return e
   345  	}
   346  	return nil
   347  }
   348  func (mc *UnorderedCache) add(e *Entry) {
   349  	mc.hmap[e.Key] = e
   350  }
   351  func (mc *UnorderedCache) del(e *Entry) {
   352  	delete(mc.hmap, e.Key)
   353  }
   354  func (mc *UnorderedCache) length() int {
   355  	return len(mc.hmap)
   356  }
   357  
   358  // OrderedCache is a cache which supports binary searches using Ceil
   359  // and Floor methods. It is backed by a left-leaning red black tree.
   360  // See comments in UnorderedCache for more details on cache functionality.
   361  //
   362  // OrderedCache requires that keys implement llrb.Comparable.
   363  //
   364  // OrderedCache is not safe for concurrent access.
   365  type OrderedCache struct {
   366  	baseCache
   367  	llrb llrb.Tree
   368  }
   369  
   370  // NewOrderedCache creates a new Cache backed by a left-leaning red
   371  // black binary tree which supports binary searches via the Ceil() and
   372  // Floor() methods. See NewUnorderedCache() for details on parameters.
   373  func NewOrderedCache(config Config) *OrderedCache {
   374  	oc := &OrderedCache{
   375  		baseCache: newBaseCache(config),
   376  	}
   377  	oc.baseCache.init(oc)
   378  	return oc
   379  }
   380  
   381  // Implementation of cacheStore interface.
   382  func (oc *OrderedCache) init() {
   383  	oc.llrb = llrb.Tree{}
   384  }
   385  func (oc *OrderedCache) get(key interface{}) *Entry {
   386  	if e, ok := oc.llrb.Get(&Entry{Key: key}).(*Entry); ok {
   387  		return e
   388  	}
   389  	return nil
   390  }
   391  func (oc *OrderedCache) add(e *Entry) {
   392  	oc.llrb.Insert(e)
   393  }
   394  func (oc *OrderedCache) del(e *Entry) {
   395  	oc.llrb.Delete(e)
   396  }
   397  func (oc *OrderedCache) length() int {
   398  	return oc.llrb.Len()
   399  }
   400  
   401  // CeilEntry returns the smallest cache entry greater than or equal to key.
   402  func (oc *OrderedCache) CeilEntry(key interface{}) (*Entry, bool) {
   403  	if e, ok := oc.llrb.Ceil(&Entry{Key: key}).(*Entry); ok {
   404  		return e, true
   405  	}
   406  	return nil, false
   407  }
   408  
   409  // Ceil returns the smallest key-value pair greater than or equal to key.
   410  func (oc *OrderedCache) Ceil(key interface{}) (interface{}, interface{}, bool) {
   411  	if e, ok := oc.CeilEntry(key); ok {
   412  		return e.Key, e.Value, true
   413  	}
   414  	return nil, nil, false
   415  }
   416  
   417  // FloorEntry returns the greatest cache entry less than or equal to key.
   418  func (oc *OrderedCache) FloorEntry(key interface{}) (*Entry, bool) {
   419  	if e, ok := oc.llrb.Floor(&Entry{Key: key}).(*Entry); ok {
   420  		return e, true
   421  	}
   422  	return nil, false
   423  }
   424  
   425  // Floor returns the greatest key-value pair less than or equal to key.
   426  func (oc *OrderedCache) Floor(key interface{}) (interface{}, interface{}, bool) {
   427  	if e, ok := oc.FloorEntry(key); ok {
   428  		return e.Key, e.Value, true
   429  	}
   430  	return nil, nil, false
   431  }
   432  
   433  // DoEntry invokes f on all cache entries in the cache. f returns a boolean
   434  // indicating the traversal is done. If f returns true, the DoEntry loop will
   435  // exit; false, it will continue. DoEntry returns whether the iteration exited
   436  // early.
   437  func (oc *OrderedCache) DoEntry(f func(e *Entry) bool) bool {
   438  	return oc.llrb.Do(func(e llrb.Comparable) bool {
   439  		return f(e.(*Entry))
   440  	})
   441  }
   442  
   443  // Do invokes f on all key-value pairs in the cache. f returns a boolean
   444  // indicating the traversal is done. If f returns true, the Do loop will exit;
   445  // false, it will continue. Do returns whether the iteration exited early.
   446  func (oc *OrderedCache) Do(f func(k, v interface{}) bool) bool {
   447  	return oc.DoEntry(func(e *Entry) bool {
   448  		return f(e.Key, e.Value)
   449  	})
   450  }
   451  
   452  // DoRangeEntry invokes f on all cache entries in the range of from -> to. f
   453  // returns a boolean indicating the traversal is done. If f returns true, the
   454  // DoRangeEntry loop will exit; false, it will continue. DoRangeEntry returns
   455  // whether the iteration exited early.
   456  func (oc *OrderedCache) DoRangeEntry(f func(e *Entry) bool, from, to interface{}) bool {
   457  	return oc.llrb.DoRange(func(e llrb.Comparable) bool {
   458  		return f(e.(*Entry))
   459  	}, &Entry{Key: from}, &Entry{Key: to})
   460  }
   461  
   462  // DoRange invokes f on all key-value pairs in the range of from -> to. f
   463  // returns a boolean indicating the traversal is done. If f returns true, the
   464  // DoRange loop will exit; false, it will continue. DoRange returns whether the
   465  // iteration exited early.
   466  func (oc *OrderedCache) DoRange(f func(k, v interface{}) bool, from, to interface{}) bool {
   467  	return oc.DoRangeEntry(func(e *Entry) bool {
   468  		return f(e.Key, e.Value)
   469  	}, from, to)
   470  }
   471  
   472  // IntervalCache is a cache which supports querying of intervals which
   473  // match a key or range of keys. It is backed by an interval tree. See
   474  // comments in UnorderedCache for more details on cache functionality.
   475  //
   476  // Note that the IntervalCache allow multiple identical segments, as
   477  // specified by start and end keys.
   478  //
   479  // Keys supplied to the IntervalCache's Get, Add & Del methods must be
   480  // constructed from IntervalCache.NewKey().
   481  //
   482  // IntervalCache is not safe for concurrent access.
   483  type IntervalCache struct {
   484  	baseCache
   485  	tree interval.Tree
   486  
   487  	// The fields below are used to avoid allocations during get, del and
   488  	// GetOverlaps.
   489  	getID      uintptr
   490  	getEntry   *Entry
   491  	overlapKey IntervalKey
   492  	overlaps   []*Entry
   493  }
   494  
   495  // IntervalKey provides uniqueness as well as key interval.
   496  type IntervalKey struct {
   497  	interval.Range
   498  	id uintptr
   499  }
   500  
   501  var intervalAlloc int64
   502  
   503  func (ik IntervalKey) String() string {
   504  	return fmt.Sprintf("%d: %q-%q", ik.id, ik.Start, ik.End)
   505  }
   506  
   507  // NewIntervalCache creates a new Cache backed by an interval tree.
   508  // See NewCache() for details on parameters.
   509  func NewIntervalCache(config Config) *IntervalCache {
   510  	ic := &IntervalCache{
   511  		baseCache: newBaseCache(config),
   512  	}
   513  	ic.baseCache.init(ic)
   514  	return ic
   515  }
   516  
   517  // NewKey creates a new interval key defined by start and end values.
   518  func (ic *IntervalCache) NewKey(start, end []byte) *IntervalKey {
   519  	k := ic.MakeKey(start, end)
   520  	return &k
   521  }
   522  
   523  // MakeKey creates a new interval key defined by start and end values.
   524  func (ic *IntervalCache) MakeKey(start, end []byte) IntervalKey {
   525  	if bytes.Compare(start, end) >= 0 {
   526  		panic(fmt.Sprintf("start key greater than or equal to end key %q >= %q", start, end))
   527  	}
   528  	return IntervalKey{
   529  		Range: interval.Range{
   530  			Start: interval.Comparable(start),
   531  			End:   interval.Comparable(end),
   532  		},
   533  		id: uintptr(atomic.AddInt64(&intervalAlloc, 1)),
   534  	}
   535  }
   536  
   537  // Implementation of cacheStore interface.
   538  func (ic *IntervalCache) init() {
   539  	ic.tree = interval.NewTree(interval.ExclusiveOverlapper)
   540  }
   541  
   542  func (ic *IntervalCache) get(key interface{}) *Entry {
   543  	ik := key.(*IntervalKey)
   544  	ic.getID = ik.id
   545  	ic.tree.DoMatching(ic.doGet, ik.Range)
   546  	e := ic.getEntry
   547  	ic.getEntry = nil
   548  	return e
   549  }
   550  
   551  func (ic *IntervalCache) doGet(i interval.Interface) bool {
   552  	e := i.(*Entry)
   553  	if e.ID() == ic.getID {
   554  		ic.getEntry = e
   555  		return true
   556  	}
   557  	return false
   558  }
   559  
   560  func (ic *IntervalCache) add(e *Entry) {
   561  	if err := ic.tree.Insert(e, false); err != nil {
   562  		log.Errorf(context.TODO(), "%v", err)
   563  	}
   564  }
   565  
   566  func (ic *IntervalCache) del(e *Entry) {
   567  	if err := ic.tree.Delete(e, false); err != nil {
   568  		log.Errorf(context.TODO(), "%v", err)
   569  	}
   570  }
   571  
   572  func (ic *IntervalCache) length() int {
   573  	return ic.tree.Len()
   574  }
   575  
   576  // GetOverlaps returns a slice of values which overlap the specified
   577  // interval. The slice is only valid until the next call to GetOverlaps.
   578  func (ic *IntervalCache) GetOverlaps(start, end []byte) []*Entry {
   579  	ic.overlapKey.Range = interval.Range{
   580  		Start: interval.Comparable(start),
   581  		End:   interval.Comparable(end),
   582  	}
   583  	ic.tree.DoMatching(ic.doOverlaps, ic.overlapKey.Range)
   584  	overlaps := ic.overlaps
   585  	ic.overlaps = ic.overlaps[:0]
   586  	return overlaps
   587  }
   588  
   589  func (ic *IntervalCache) doOverlaps(i interval.Interface) bool {
   590  	e := i.(*Entry)
   591  	ic.access(e) // maintain cache eviction ordering
   592  	ic.overlaps = append(ic.overlaps, e)
   593  	return false
   594  }