github.com/cockroachdb/cockroachdb-parser@v0.23.3-0.20240213214944-911057d40c9a/pkg/util/cache/cache.go (about)

     1  // Copyright 2014 The Cockroach Authors.
     2  //
     3  // Use of this software is governed by the Business Source License
     4  // included in the file licenses/BSL.txt.
     5  //
     6  // As of the Change Date specified in that file, in accordance with
     7  // the Business Source License, use of this software will be governed
     8  // by the Apache License, Version 2.0, included in the file
     9  // licenses/APL.txt.
    10  //
    11  // This code is based on: https://github.com/golang/groupcache/
    12  
    13  package cache
    14  
    15  import (
    16  	"bytes"
    17  	"context"
    18  	"fmt"
    19  	"sync"
    20  	"sync/atomic"
    21  
    22  	"github.com/biogo/store/llrb"
    23  	"github.com/cockroachdb/cockroachdb-parser/pkg/util/interval"
    24  )
    25  
    26  // EvictionPolicy is the cache eviction policy enum.
    27  type EvictionPolicy int
    28  
    29  // Constants describing LRU and FIFO, and None cache eviction policies
    30  // respectively.
    31  const (
    32  	CacheLRU  EvictionPolicy = iota // Least recently used
    33  	CacheFIFO                       // First in, first out
    34  	CacheNone                       // No evictions; don't maintain ordering list
    35  )
    36  
    37  // A Config specifies the eviction policy, eviction
    38  // trigger callback, and eviction listener callback.
    39  type Config struct {
    40  	// Policy is one of the consts listed for EvictionPolicy.
    41  	Policy EvictionPolicy
    42  
    43  	// ShouldEvict is a callback function executed each time a new entry
    44  	// is added to the cache. It supplies cache size, and potential
    45  	// evictee's key and value. The function should return true if the
    46  	// entry may be evicted; false otherwise. For example, to support a
    47  	// maximum size for the cache, use a method like:
    48  	//
    49  	//   func(size int, key Key, value interface{}) { return size > maxSize }
    50  	//
    51  	// To support max TTL in the cache, use something like:
    52  	//
    53  	//   func(size int, key Key, value interface{}) {
    54  	//     return timeutil.Now().UnixNano() - value.(int64) > maxTTLNanos
    55  	//   }
    56  	ShouldEvict func(size int, key, value interface{}) bool
    57  
    58  	// OnEvicted optionally specifies a callback function to be
    59  	// executed when an entry is purged from the cache.
    60  	OnEvicted func(key, value interface{})
    61  
    62  	// OnEvictedEntry optionally specifies a callback function to
    63  	// be executed when an entry is purged from the cache.
    64  	OnEvictedEntry func(entry *Entry)
    65  }
    66  
    67  // Entry holds the key and value and a pointer to the linked list
    68  // which defines the eviction ordering.
    69  type Entry struct {
    70  	Key, Value interface{}
    71  	next, prev *Entry
    72  }
    73  
    74  // Object pool used for short-lived Entry objects.
    75  var entryPool = sync.Pool{
    76  	New: func() interface{} { return &Entry{} },
    77  }
    78  
    79  func newEntry(key interface{}) *Entry {
    80  	e := entryPool.Get().(*Entry)
    81  	e.Key = key
    82  	return e
    83  }
    84  
    85  func (e *Entry) release() {
    86  	*e = Entry{}
    87  	entryPool.Put(e)
    88  }
    89  
    90  func (e Entry) String() string {
    91  	return fmt.Sprintf("%s", e.Key)
    92  }
    93  
    94  // Compare implements the llrb.Comparable interface for cache entries.
    95  // This facility is used by the OrderedCache, and crucially requires
    96  // that keys used with that cache implement llrb.Comparable.
    97  func (e *Entry) Compare(b llrb.Comparable) int {
    98  	return e.Key.(llrb.Comparable).Compare(b.(*Entry).Key.(llrb.Comparable))
    99  }
   100  
   101  // The following methods implement the interval.Interface for entry by casting
   102  // the entry key to an interval key and calling the appropriate accessers.
   103  
   104  // ID implements interval.Interface
   105  func (e *Entry) ID() uintptr {
   106  	return e.Key.(*IntervalKey).id
   107  }
   108  
   109  // Range implements interval.Interface
   110  func (e *Entry) Range() interval.Range {
   111  	return e.Key.(*IntervalKey).Range
   112  }
   113  
   114  // entryList is a double-linked circular list of *Entry elements. The code is
   115  // derived from the stdlib container/list but customized to Entry in order to
   116  // avoid a separate allocation for every element.
   117  type entryList struct {
   118  	root Entry
   119  }
   120  
   121  func (l *entryList) init() {
   122  	l.root.next = &l.root
   123  	l.root.prev = &l.root
   124  }
   125  
   126  func (l *entryList) back() *Entry {
   127  	return l.root.prev
   128  }
   129  
   130  func (l *entryList) insertAfter(e, at *Entry) {
   131  	n := at.next
   132  	at.next = e
   133  	e.prev = at
   134  	e.next = n
   135  	n.prev = e
   136  }
   137  
   138  func (l *entryList) insertBefore(e, mark *Entry) {
   139  	l.insertAfter(e, mark.prev)
   140  }
   141  
   142  func (l *entryList) remove(e *Entry) *Entry {
   143  	if e == &l.root {
   144  		panic("cannot remove root list node")
   145  	}
   146  	// TODO(peter): Revert this protection against removing a non-existent entry
   147  	// from the list when the cause of
   148  	// https://github.com/cockroachdb/cockroachdb-parser/issues/6190 is determined. Should
   149  	// be replaced with an explicit panic instead of the implicit one of a
   150  	// nil-pointer dereference.
   151  	if e.next != nil {
   152  		e.prev.next = e.next
   153  		e.next.prev = e.prev
   154  		e.next = nil // avoid memory leaks
   155  		e.prev = nil // avoid memory leaks
   156  	}
   157  	return e
   158  }
   159  
   160  func (l *entryList) pushFront(e *Entry) {
   161  	l.insertAfter(e, &l.root)
   162  }
   163  
   164  func (l *entryList) moveToFront(e *Entry) {
   165  	if l.root.next == e {
   166  		return
   167  	}
   168  	l.insertAfter(l.remove(e), &l.root)
   169  }
   170  
   171  // cacheStore is an interface for the backing store used for the cache.
   172  type cacheStore interface {
   173  	// init initializes or clears all entries.
   174  	init()
   175  	// get returns the entry by key.
   176  	get(key interface{}) *Entry
   177  	// add stores an entry.
   178  	add(e *Entry)
   179  	// del removes an entry.
   180  	del(e *Entry)
   181  	// len is number of items in store.
   182  	length() int
   183  }
   184  
   185  // baseCache contains the config, cacheStore interface, and the linked
   186  // list for eviction order.
   187  type baseCache struct {
   188  	Config
   189  	store cacheStore
   190  	ll    entryList
   191  }
   192  
   193  func newBaseCache(config Config) baseCache {
   194  	return baseCache{
   195  		Config: config,
   196  	}
   197  }
   198  
   199  // init initializes the baseCache with the provided cacheStore. It must be
   200  // called with a non-nil cacheStore before use of the cache.
   201  func (bc *baseCache) init(store cacheStore) {
   202  	bc.ll.init()
   203  	bc.store = store
   204  	bc.store.init()
   205  }
   206  
   207  // Add adds a value to the cache.
   208  func (bc *baseCache) Add(key, value interface{}) {
   209  	bc.add(key, value, nil, nil)
   210  }
   211  
   212  // AddEntry adds a value to the cache. It provides an alternative interface to
   213  // Add which the caller can use to reduce allocations by bundling the Entry
   214  // structure with the key and value to be stored.
   215  func (bc *baseCache) AddEntry(entry *Entry) {
   216  	bc.add(entry.Key, entry.Value, entry, nil)
   217  }
   218  
   219  // AddEntryAfter adds a value to the cache, making sure that it is placed after
   220  // the second entry in the eviction queue. It provides an alternative interface to
   221  // Add which the caller can use to reduce allocations by bundling the Entry
   222  // structure with the key and value to be stored.
   223  func (bc *baseCache) AddEntryAfter(entry, after *Entry) {
   224  	bc.add(entry.Key, entry.Value, entry, after)
   225  }
   226  
   227  // MoveToEnd moves the entry to the end of the eviction queue.
   228  func (bc *baseCache) MoveToEnd(entry *Entry) {
   229  	bc.ll.moveToFront(entry)
   230  }
   231  
   232  func (bc *baseCache) add(key, value interface{}, entry, after *Entry) {
   233  	if e := bc.store.get(key); e != nil {
   234  		bc.access(e)
   235  		e.Value = value
   236  		return
   237  	}
   238  	e := entry
   239  	if e == nil {
   240  		e = &Entry{Key: key, Value: value}
   241  	}
   242  	if after != nil {
   243  		bc.ll.insertBefore(e, after)
   244  	} else {
   245  		bc.ll.pushFront(e)
   246  	}
   247  	bc.store.add(e)
   248  	// Evict as many elements as we can.
   249  	for bc.evict() {
   250  	}
   251  }
   252  
   253  // Get looks up a key's value from the cache.
   254  func (bc *baseCache) Get(key interface{}) (value interface{}, ok bool) {
   255  	if e := bc.store.get(key); e != nil {
   256  		bc.access(e)
   257  		return e.Value, true
   258  	}
   259  	return nil, false
   260  }
   261  
   262  // StealthyGet looks up a key's value from the cache but does not consider it an
   263  // "access" (with respect to the policy).
   264  func (bc *baseCache) StealthyGet(key interface{}) (value interface{}, ok bool) {
   265  	if e := bc.store.get(key); e != nil {
   266  		return e.Value, true
   267  	}
   268  	return nil, false
   269  }
   270  
   271  // Del removes the provided key from the cache.
   272  func (bc *baseCache) Del(key interface{}) {
   273  	e := bc.store.get(key)
   274  	bc.DelEntry(e)
   275  }
   276  
   277  // DelEntry removes the provided entry from the cache.
   278  func (bc *baseCache) DelEntry(entry *Entry) {
   279  	if entry != nil {
   280  		bc.removeElement(entry)
   281  	}
   282  }
   283  
   284  // Clear clears all entries from the cache.
   285  func (bc *baseCache) Clear() {
   286  	if bc.OnEvicted != nil || bc.OnEvictedEntry != nil {
   287  		for e := bc.ll.back(); e != &bc.ll.root; e = e.prev {
   288  			if bc.OnEvicted != nil {
   289  				bc.OnEvicted(e.Key, e.Value)
   290  			}
   291  			if bc.OnEvictedEntry != nil {
   292  				bc.OnEvictedEntry(e)
   293  			}
   294  		}
   295  	}
   296  	bc.ll.init()
   297  	bc.store.init()
   298  }
   299  
   300  // Len returns the number of items in the cache.
   301  func (bc *baseCache) Len() int {
   302  	return bc.store.length()
   303  }
   304  
   305  // Do iterates over all entries in the cache and calls fn with each entry.
   306  func (bc *baseCache) Do(fn func(e *Entry)) {
   307  	for e := bc.ll.root.next; e != &bc.ll.root; e = e.next {
   308  		fn(e)
   309  	}
   310  }
   311  
   312  func (bc *baseCache) access(e *Entry) {
   313  	if bc.Policy == CacheLRU {
   314  		bc.ll.moveToFront(e)
   315  	}
   316  }
   317  
   318  func (bc *baseCache) removeElement(e *Entry) {
   319  	bc.ll.remove(e)
   320  	bc.store.del(e)
   321  	if bc.OnEvicted != nil {
   322  		bc.OnEvicted(e.Key, e.Value)
   323  	}
   324  	if bc.OnEvictedEntry != nil {
   325  		bc.OnEvictedEntry(e)
   326  	}
   327  }
   328  
   329  // evict removes the oldest item from the cache for FIFO and
   330  // the least recently used item for LRU. Returns true if an
   331  // entry was evicted, false otherwise.
   332  func (bc *baseCache) evict() bool {
   333  	if bc.ShouldEvict == nil || bc.Policy == CacheNone {
   334  		return false
   335  	}
   336  	l := bc.store.length()
   337  	if l > 0 {
   338  		e := bc.ll.back()
   339  		if bc.ShouldEvict(l, e.Key, e.Value) {
   340  			bc.removeElement(e)
   341  			return true
   342  		}
   343  	}
   344  	return false
   345  }
   346  
   347  // UnorderedCache is a cache which supports custom eviction triggers and two
   348  // eviction policies: LRU and FIFO. A listener pattern is available
   349  // for eviction events. This cache uses a hashmap for storing elements,
   350  // making it the most performant. Only exact lookups are supported.
   351  //
   352  // UnorderedCache requires that keys are comparable, according to the go
   353  // specification (http://golang.org/ref/spec#Comparison_operators).
   354  //
   355  // UnorderedCache is not safe for concurrent access.
   356  type UnorderedCache struct {
   357  	baseCache
   358  	hmap map[interface{}]interface{}
   359  }
   360  
   361  // NewUnorderedCache creates a new UnorderedCache backed by a hash map.
   362  func NewUnorderedCache(config Config) *UnorderedCache {
   363  	mc := &UnorderedCache{
   364  		baseCache: newBaseCache(config),
   365  	}
   366  	mc.baseCache.init(mc)
   367  	return mc
   368  }
   369  
   370  // Implementation of cacheStore interface.
   371  func (mc *UnorderedCache) init() {
   372  	mc.hmap = make(map[interface{}]interface{})
   373  }
   374  func (mc *UnorderedCache) get(key interface{}) *Entry {
   375  	if e, ok := mc.hmap[key].(*Entry); ok {
   376  		return e
   377  	}
   378  	return nil
   379  }
   380  func (mc *UnorderedCache) add(e *Entry) {
   381  	mc.hmap[e.Key] = e
   382  }
   383  func (mc *UnorderedCache) del(e *Entry) {
   384  	delete(mc.hmap, e.Key)
   385  }
   386  func (mc *UnorderedCache) length() int {
   387  	return len(mc.hmap)
   388  }
   389  
   390  // OrderedCache is a cache which supports binary searches using Ceil
   391  // and Floor methods. It is backed by a left-leaning red black tree.
   392  // See comments in UnorderedCache for more details on cache functionality.
   393  //
   394  // OrderedCache requires that keys implement llrb.Comparable.
   395  //
   396  // OrderedCache is not safe for concurrent access.
   397  type OrderedCache struct {
   398  	baseCache
   399  	llrb llrb.Tree
   400  }
   401  
   402  // NewOrderedCache creates a new Cache backed by a left-leaning red
   403  // black binary tree which supports binary searches via the Ceil() and
   404  // Floor() methods. See NewUnorderedCache() for details on parameters.
   405  func NewOrderedCache(config Config) *OrderedCache {
   406  	oc := &OrderedCache{
   407  		baseCache: newBaseCache(config),
   408  	}
   409  	oc.baseCache.init(oc)
   410  	return oc
   411  }
   412  
   413  // Implementation of cacheStore interface.
   414  func (oc *OrderedCache) init() {
   415  	oc.llrb = llrb.Tree{}
   416  }
   417  func (oc *OrderedCache) get(key interface{}) *Entry {
   418  	eKey := newEntry(key)
   419  	defer eKey.release()
   420  	if e, ok := oc.llrb.Get(eKey).(*Entry); ok {
   421  		return e
   422  	}
   423  	return nil
   424  }
   425  func (oc *OrderedCache) add(e *Entry) {
   426  	oc.llrb.Insert(e)
   427  }
   428  func (oc *OrderedCache) del(e *Entry) {
   429  	oc.llrb.Delete(e)
   430  }
   431  func (oc *OrderedCache) length() int {
   432  	return oc.llrb.Len()
   433  }
   434  
   435  // CeilEntry returns the smallest cache entry greater than or equal to key.
   436  func (oc *OrderedCache) CeilEntry(key interface{}) (*Entry, bool) {
   437  	eKey := newEntry(key)
   438  	defer eKey.release()
   439  	if e, ok := oc.llrb.Ceil(eKey).(*Entry); ok {
   440  		return e, true
   441  	}
   442  	return nil, false
   443  }
   444  
   445  // Ceil returns the smallest key-value pair greater than or equal to key.
   446  func (oc *OrderedCache) Ceil(key interface{}) (interface{}, interface{}, bool) {
   447  	if e, ok := oc.CeilEntry(key); ok {
   448  		return e.Key, e.Value, true
   449  	}
   450  	return nil, nil, false
   451  }
   452  
   453  // FloorEntry returns the greatest cache entry less than or equal to key.
   454  func (oc *OrderedCache) FloorEntry(key interface{}) (*Entry, bool) {
   455  	eKey := newEntry(key)
   456  	defer eKey.release()
   457  	if e, ok := oc.llrb.Floor(eKey).(*Entry); ok {
   458  		return e, true
   459  	}
   460  	return nil, false
   461  }
   462  
   463  // Floor returns the greatest key-value pair less than or equal to key.
   464  func (oc *OrderedCache) Floor(key interface{}) (interface{}, interface{}, bool) {
   465  	if e, ok := oc.FloorEntry(key); ok {
   466  		return e.Key, e.Value, true
   467  	}
   468  	return nil, nil, false
   469  }
   470  
   471  // DoEntry invokes f on all cache entries in the cache. f returns a boolean
   472  // indicating the traversal is done. If f returns true, the DoEntry loop will
   473  // exit; false, it will continue. DoEntry returns whether the iteration exited
   474  // early.
   475  func (oc *OrderedCache) DoEntry(f func(e *Entry) bool) bool {
   476  	return oc.llrb.Do(func(e llrb.Comparable) bool {
   477  		return f(e.(*Entry))
   478  	})
   479  }
   480  
   481  // Do invokes f on all key-value pairs in the cache. f returns a boolean
   482  // indicating the traversal is done. If f returns true, the Do loop will exit;
   483  // false, it will continue. Do returns whether the iteration exited early.
   484  func (oc *OrderedCache) Do(f func(k, v interface{}) bool) bool {
   485  	return oc.DoEntry(func(e *Entry) bool {
   486  		return f(e.Key, e.Value)
   487  	})
   488  }
   489  
   490  // DoRangeEntry invokes f on all cache entries in the range of from -> to. f
   491  // returns a boolean indicating the traversal is done. If f returns true, the
   492  // DoRangeEntry loop will exit; false, it will continue. DoRangeEntry returns
   493  // whether the iteration exited early.
   494  func (oc *OrderedCache) DoRangeEntry(f func(e *Entry) bool, from, to interface{}) bool {
   495  	eFrom := newEntry(from)
   496  	eTo := newEntry(to)
   497  	defer eFrom.release()
   498  	defer eTo.release()
   499  	return oc.llrb.DoRange(func(e llrb.Comparable) bool {
   500  		return f(e.(*Entry))
   501  	}, eFrom, eTo)
   502  }
   503  
   504  // DoRangeReverseEntry invokes f on all cache entries in the range (to, from]. from
   505  // should be higher than to.
   506  // f returns a boolean indicating the traversal is done. If f returns true, the
   507  // DoRangeReverseEntry loop will exit; false, it will continue.
   508  // DoRangeReverseEntry returns whether the iteration exited early.
   509  func (oc *OrderedCache) DoRangeReverseEntry(f func(e *Entry) bool, from, to interface{}) bool {
   510  	eFrom := newEntry(from)
   511  	eTo := newEntry(to)
   512  	defer eFrom.release()
   513  	defer eTo.release()
   514  	return oc.llrb.DoRangeReverse(func(e llrb.Comparable) bool {
   515  		return f(e.(*Entry))
   516  	}, eFrom, eTo)
   517  }
   518  
   519  // DoRange invokes f on all key-value pairs in the range of from -> to. f
   520  // returns a boolean indicating the traversal is done. If f returns true, the
   521  // DoRange loop will exit; false, it will continue. DoRange returns whether the
   522  // iteration exited early.
   523  func (oc *OrderedCache) DoRange(f func(k, v interface{}) bool, from, to interface{}) bool {
   524  	return oc.DoRangeEntry(func(e *Entry) bool {
   525  		return f(e.Key, e.Value)
   526  	}, from, to)
   527  }
   528  
   529  // IntervalCache is a cache which supports querying of intervals which
   530  // match a key or range of keys. It is backed by an interval tree. See
   531  // comments in UnorderedCache for more details on cache functionality.
   532  //
   533  // Note that the IntervalCache allow multiple identical segments, as
   534  // specified by start and end keys.
   535  //
   536  // Keys supplied to the IntervalCache's Get, Add & Del methods must be
   537  // constructed from IntervalCache.NewKey().
   538  //
   539  // IntervalCache is not safe for concurrent access.
   540  type IntervalCache struct {
   541  	baseCache
   542  	tree      interval.Tree
   543  	logErrorf IntervalCacheLogErrorf
   544  
   545  	// The fields below are used to avoid allocations during get, del and
   546  	// GetOverlaps.
   547  	getID      uintptr
   548  	getEntry   *Entry
   549  	overlapKey IntervalKey
   550  	overlaps   []*Entry
   551  }
   552  
   553  // IntervalCacheLogErrorf is a hook that is called on certain errors in the IntervalCache.
   554  // This is used to prevent an import to util/log.
   555  type IntervalCacheLogErrorf func(ctx context.Context, format string, args ...interface{})
   556  
   557  // IntervalKey provides uniqueness as well as key interval.
   558  type IntervalKey struct {
   559  	interval.Range
   560  	id uintptr
   561  }
   562  
   563  var intervalAlloc int64
   564  
   565  func (ik IntervalKey) String() string {
   566  	return fmt.Sprintf("%d: %q-%q", ik.id, ik.Start, ik.End)
   567  }
   568  
   569  // NewIntervalCache creates a new Cache backed by an interval tree.
   570  // See NewCache() for details on parameters.
   571  func NewIntervalCache(config Config, logErrorf IntervalCacheLogErrorf) *IntervalCache {
   572  	ic := &IntervalCache{
   573  		baseCache: newBaseCache(config),
   574  		logErrorf: logErrorf,
   575  	}
   576  	ic.baseCache.init(ic)
   577  	return ic
   578  }
   579  
   580  // NewKey creates a new interval key defined by start and end values.
   581  func (ic *IntervalCache) NewKey(start, end []byte) *IntervalKey {
   582  	k := ic.MakeKey(start, end)
   583  	return &k
   584  }
   585  
   586  // MakeKey creates a new interval key defined by start and end values.
   587  func (ic *IntervalCache) MakeKey(start, end []byte) IntervalKey {
   588  	if bytes.Compare(start, end) >= 0 {
   589  		panic(fmt.Sprintf("start key greater than or equal to end key %q >= %q", start, end))
   590  	}
   591  	return IntervalKey{
   592  		Range: interval.Range{
   593  			Start: interval.Comparable(start),
   594  			End:   interval.Comparable(end),
   595  		},
   596  		id: uintptr(atomic.AddInt64(&intervalAlloc, 1)),
   597  	}
   598  }
   599  
   600  // Implementation of cacheStore interface.
   601  func (ic *IntervalCache) init() {
   602  	ic.tree = interval.NewTree(interval.ExclusiveOverlapper)
   603  }
   604  
   605  func (ic *IntervalCache) get(key interface{}) *Entry {
   606  	ik := key.(*IntervalKey)
   607  	ic.getID = ik.id
   608  	ic.tree.DoMatching(ic.doGet, ik.Range)
   609  	e := ic.getEntry
   610  	ic.getEntry = nil
   611  	return e
   612  }
   613  
   614  func (ic *IntervalCache) doGet(i interval.Interface) bool {
   615  	e := i.(*Entry)
   616  	if e.ID() == ic.getID {
   617  		ic.getEntry = e
   618  		return true
   619  	}
   620  	return false
   621  }
   622  
   623  func (ic *IntervalCache) add(e *Entry) {
   624  	if err := ic.tree.Insert(e, false); err != nil {
   625  		ic.logErrorf(context.TODO(), "%v", err)
   626  	}
   627  }
   628  
   629  func (ic *IntervalCache) del(e *Entry) {
   630  	if err := ic.tree.Delete(e, false); err != nil {
   631  		ic.logErrorf(context.TODO(), "%v", err)
   632  	}
   633  }
   634  
   635  func (ic *IntervalCache) length() int {
   636  	return ic.tree.Len()
   637  }
   638  
   639  // GetOverlaps returns a slice of values which overlap the specified
   640  // interval. The slice is only valid until the next call to GetOverlaps.
   641  func (ic *IntervalCache) GetOverlaps(start, end []byte) []*Entry {
   642  	ic.overlapKey.Range = interval.Range{
   643  		Start: interval.Comparable(start),
   644  		End:   interval.Comparable(end),
   645  	}
   646  	ic.tree.DoMatching(ic.doOverlaps, ic.overlapKey.Range)
   647  	overlaps := ic.overlaps
   648  	ic.overlaps = ic.overlaps[:0]
   649  	return overlaps
   650  }
   651  
   652  func (ic *IntervalCache) doOverlaps(i interval.Interface) bool {
   653  	e := i.(*Entry)
   654  	ic.access(e) // maintain cache eviction ordering
   655  	ic.overlaps = append(ic.overlaps, e)
   656  	return false
   657  }