go.temporal.io/server@v1.23.0/common/cache/lru.go (about)

     1  // The MIT License
     2  //
     3  // Copyright (c) 2020 Temporal Technologies Inc.  All rights reserved.
     4  //
     5  // Copyright (c) 2020 Uber Technologies, Inc.
     6  //
     7  // Permission is hereby granted, free of charge, to any person obtaining a copy
     8  // of this software and associated documentation files (the "Software"), to deal
     9  // in the Software without restriction, including without limitation the rights
    10  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    11  // copies of the Software, and to permit persons to whom the Software is
    12  // furnished to do so, subject to the following conditions:
    13  //
    14  // The above copyright notice and this permission notice shall be included in
    15  // all copies or substantial portions of the Software.
    16  //
    17  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    18  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    19  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    20  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    21  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    22  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    23  // THE SOFTWARE.
    24  
    25  package cache
    26  
    27  import (
    28  	"container/list"
    29  	"sync"
    30  	"time"
    31  
    32  	enumspb "go.temporal.io/api/enums/v1"
    33  	"go.temporal.io/api/serviceerror"
    34  	"go.temporal.io/server/common/clock"
    35  )
    36  
    37  var (
    38  	// ErrCacheFull is returned if Put fails due to cache being filled with pinned elements
    39  	ErrCacheFull = serviceerror.NewResourceExhausted(
    40  		enumspb.RESOURCE_EXHAUSTED_CAUSE_SYSTEM_OVERLOADED,
    41  		"cache capacity is fully occupied with pinned elements",
    42  	)
    43  	// ErrCacheItemTooLarge is returned if Put fails due to item size being larger than max cache capacity
    44  	ErrCacheItemTooLarge = serviceerror.NewInternal("cache item size is larger than max cache capacity")
    45  )
    46  
    47  const emptyEntrySize = 0
    48  
    49  // lru is a concurrent fixed size cache that evicts elements in lru order
    50  type (
    51  	lru struct {
    52  		mut        sync.Mutex
    53  		byAccess   *list.List
    54  		byKey      map[interface{}]*list.Element
    55  		maxSize    int
    56  		currSize   int
    57  		ttl        time.Duration
    58  		pin        bool
    59  		timeSource clock.TimeSource
    60  	}
    61  
    62  	iteratorImpl struct {
    63  		lru        *lru
    64  		createTime time.Time
    65  		nextItem   *list.Element
    66  	}
    67  
    68  	entryImpl struct {
    69  		key        interface{}
    70  		createTime time.Time
    71  		value      interface{}
    72  		refCount   int
    73  		size       int
    74  	}
    75  )
    76  
    77  // Close closes the iterator
    78  func (it *iteratorImpl) Close() {
    79  	it.lru.mut.Unlock()
    80  }
    81  
    82  // HasNext return true if there is more items to be returned
    83  func (it *iteratorImpl) HasNext() bool {
    84  	return it.nextItem != nil
    85  }
    86  
    87  // Next return the next item
    88  func (it *iteratorImpl) Next() Entry {
    89  	if it.nextItem == nil {
    90  		panic("LRU cache iterator Next called when there is no next item")
    91  	}
    92  
    93  	entry := it.nextItem.Value.(*entryImpl)
    94  	it.nextItem = it.nextItem.Next()
    95  	// make a copy of the entry so there will be no concurrent access to this entry
    96  	entry = &entryImpl{
    97  		key:        entry.key,
    98  		value:      entry.value,
    99  		size:       entry.size,
   100  		createTime: entry.createTime,
   101  	}
   102  	it.prepareNext()
   103  	return entry
   104  }
   105  
   106  func (it *iteratorImpl) prepareNext() {
   107  	for it.nextItem != nil {
   108  		entry := it.nextItem.Value.(*entryImpl)
   109  		if it.lru.isEntryExpired(entry, it.createTime) {
   110  			nextItem := it.nextItem.Next()
   111  			it.lru.deleteInternal(it.nextItem)
   112  			it.nextItem = nextItem
   113  		} else {
   114  			return
   115  		}
   116  	}
   117  }
   118  
   119  // Iterator returns an iterator to the map. This map
   120  // does not use re-entrant locks, so access or modification
   121  // to the map during iteration can cause a dead lock.
   122  func (c *lru) Iterator() Iterator {
   123  	c.mut.Lock()
   124  	iterator := &iteratorImpl{
   125  		lru:        c,
   126  		createTime: c.timeSource.Now().UTC(),
   127  		nextItem:   c.byAccess.Front(),
   128  	}
   129  	iterator.prepareNext()
   130  	return iterator
   131  }
   132  
   133  func (entry *entryImpl) Key() interface{} {
   134  	return entry.key
   135  }
   136  
   137  func (entry *entryImpl) Value() interface{} {
   138  	return entry.value
   139  }
   140  
   141  func (entry *entryImpl) Size() int {
   142  	return entry.size
   143  }
   144  
   145  func (entry *entryImpl) CreateTime() time.Time {
   146  	return entry.createTime
   147  }
   148  
   149  // New creates a new cache with the given options
   150  func New(maxSize int, opts *Options) Cache {
   151  	if opts == nil {
   152  		opts = &Options{}
   153  	}
   154  	timeSource := opts.TimeSource
   155  	if timeSource == nil {
   156  		timeSource = clock.NewRealTimeSource()
   157  	}
   158  
   159  	return &lru{
   160  		byAccess:   list.New(),
   161  		byKey:      make(map[interface{}]*list.Element),
   162  		ttl:        opts.TTL,
   163  		maxSize:    maxSize,
   164  		currSize:   0,
   165  		pin:        opts.Pin,
   166  		timeSource: timeSource,
   167  	}
   168  }
   169  
   170  // NewLRU creates a new LRU cache of the given size, setting initial capacity
   171  // to the max size
   172  func NewLRU(maxSize int) Cache {
   173  	return New(maxSize, nil)
   174  }
   175  
   176  // Get retrieves the value stored under the given key
   177  func (c *lru) Get(key interface{}) interface{} {
   178  	if c.maxSize == 0 { //
   179  		return nil
   180  	}
   181  	c.mut.Lock()
   182  	defer c.mut.Unlock()
   183  
   184  	element := c.byKey[key]
   185  	if element == nil {
   186  		return nil
   187  	}
   188  
   189  	entry := element.Value.(*entryImpl)
   190  
   191  	if c.isEntryExpired(entry, c.timeSource.Now().UTC()) {
   192  		// Entry has expired
   193  		c.deleteInternal(element)
   194  		return nil
   195  	}
   196  
   197  	if c.pin {
   198  		entry.refCount++
   199  	}
   200  	c.byAccess.MoveToFront(element)
   201  	return entry.value
   202  }
   203  
   204  // Put puts a new value associated with a given key, returning the existing value (if present)
   205  func (c *lru) Put(key interface{}, value interface{}) interface{} {
   206  	if c.pin {
   207  		panic("Cannot use Put API in Pin mode. Use Delete and PutIfNotExist if necessary")
   208  	}
   209  	val, _ := c.putInternal(key, value, true)
   210  	return val
   211  }
   212  
   213  // PutIfNotExist puts a value associated with a given key if it does not exist
   214  func (c *lru) PutIfNotExist(key interface{}, value interface{}) (interface{}, error) {
   215  	existing, err := c.putInternal(key, value, false)
   216  	if err != nil {
   217  		return nil, err
   218  	}
   219  
   220  	if existing == nil {
   221  		// This is a new value
   222  		return value, err
   223  	}
   224  
   225  	return existing, err
   226  }
   227  
   228  // Delete deletes a key, value pair associated with a key
   229  func (c *lru) Delete(key interface{}) {
   230  	if c.maxSize == 0 {
   231  		return
   232  	}
   233  	c.mut.Lock()
   234  	defer c.mut.Unlock()
   235  
   236  	element := c.byKey[key]
   237  	if element != nil {
   238  		c.deleteInternal(element)
   239  	}
   240  }
   241  
   242  // Release decrements the ref count of a pinned element.
   243  func (c *lru) Release(key interface{}) {
   244  	if c.maxSize == 0 || !c.pin {
   245  		return
   246  	}
   247  	c.mut.Lock()
   248  	defer c.mut.Unlock()
   249  
   250  	elt, ok := c.byKey[key]
   251  	if !ok {
   252  		return
   253  	}
   254  	entry := elt.Value.(*entryImpl)
   255  	entry.refCount--
   256  }
   257  
   258  // Size returns the current size of the lru, useful if cache is not full. This size is calculated by summing
   259  // the size of all entries in the cache. And the entry size is calculated by the size of the value.
   260  // The size of the value is calculated implementing the Sizeable interface. If the value does not implement
   261  // the Sizeable interface, the size is 1.
   262  func (c *lru) Size() int {
   263  	c.mut.Lock()
   264  	defer c.mut.Unlock()
   265  
   266  	return c.currSize
   267  }
   268  
   269  // Put puts a new value associated with a given key, returning the existing value (if present)
   270  // allowUpdate flag is used to control overwrite behavior if the value exists.
   271  func (c *lru) putInternal(key interface{}, value interface{}, allowUpdate bool) (interface{}, error) {
   272  	if c.maxSize == 0 {
   273  		return nil, nil
   274  	}
   275  	newEntrySize := getSize(value)
   276  	if newEntrySize > c.maxSize {
   277  		return nil, ErrCacheItemTooLarge
   278  	}
   279  
   280  	c.mut.Lock()
   281  	defer c.mut.Unlock()
   282  
   283  	elt := c.byKey[key]
   284  	// If the entry exists, check if it has expired or update the value
   285  	if elt != nil {
   286  		existingEntry := elt.Value.(*entryImpl)
   287  		if !c.isEntryExpired(existingEntry, time.Now().UTC()) {
   288  			existingVal := existingEntry.value
   289  			if allowUpdate {
   290  				newCacheSize := c.calculateNewCacheSize(newEntrySize, existingEntry.Size())
   291  				if newCacheSize > c.maxSize {
   292  					c.tryEvictUntilEnoughSpaceWithSkipEntry(newEntrySize, existingEntry)
   293  					// calculate again after eviction
   294  					newCacheSize = c.calculateNewCacheSize(newEntrySize, existingEntry.Size())
   295  					if newCacheSize > c.maxSize {
   296  						// This should never happen since allowUpdate is always **true** for non-pinned cache,
   297  						// and if all entries are not pinned(ref==0), then the cache should never be full as long as
   298  						// new entry's size is less than max size.
   299  						// However, to prevent any unexpected behavior, it checks the cache size again.
   300  						return nil, ErrCacheFull
   301  					}
   302  				}
   303  				existingEntry.value = value
   304  				existingEntry.size = newEntrySize
   305  				c.currSize = newCacheSize
   306  				c.updateEntryTTL(existingEntry)
   307  			}
   308  
   309  			c.updateEntryRefCount(existingEntry)
   310  			c.byAccess.MoveToFront(elt)
   311  			return existingVal, nil
   312  		}
   313  
   314  		// Entry has expired
   315  		c.deleteInternal(elt)
   316  	}
   317  
   318  	c.tryEvictUntilEnoughSpaceWithSkipEntry(newEntrySize, nil)
   319  
   320  	// check if the new entry can fit in the cache
   321  	newCacheSize := c.calculateNewCacheSize(newEntrySize, emptyEntrySize)
   322  	if newCacheSize > c.maxSize {
   323  		return nil, ErrCacheFull
   324  	}
   325  
   326  	entry := &entryImpl{
   327  		key:   key,
   328  		value: value,
   329  		size:  newEntrySize,
   330  	}
   331  
   332  	c.updateEntryTTL(entry)
   333  	c.updateEntryRefCount(entry)
   334  	element := c.byAccess.PushFront(entry)
   335  	c.byKey[key] = element
   336  	c.currSize = newCacheSize
   337  	return nil, nil
   338  }
   339  
   340  func (c *lru) calculateNewCacheSize(newEntrySize int, existingEntrySize int) int {
   341  	return c.currSize - existingEntrySize + newEntrySize
   342  }
   343  
   344  func (c *lru) deleteInternal(element *list.Element) {
   345  	entry := c.byAccess.Remove(element).(*entryImpl)
   346  	c.currSize -= entry.Size()
   347  	delete(c.byKey, entry.key)
   348  }
   349  
   350  // tryEvictUntilEnoughSpace try to evict entries until there is enough space for the new entry without
   351  // evicting the existing entry. the existing entry is skipped because it is being updated.
   352  func (c *lru) tryEvictUntilEnoughSpaceWithSkipEntry(newEntrySize int, existingEntry *entryImpl) {
   353  	element := c.byAccess.Back()
   354  	existingEntrySize := 0
   355  	if existingEntry != nil {
   356  		existingEntrySize = existingEntry.Size()
   357  	}
   358  
   359  	for c.calculateNewCacheSize(newEntrySize, existingEntrySize) > c.maxSize && element != nil {
   360  		entry := element.Value.(*entryImpl)
   361  		if existingEntry != nil && entry.key == existingEntry.key {
   362  			element = element.Prev()
   363  			continue
   364  		}
   365  		element = c.tryEvictAndGetPreviousElement(entry, element)
   366  	}
   367  }
   368  
   369  func (c *lru) tryEvictAndGetPreviousElement(entry *entryImpl, element *list.Element) *list.Element {
   370  	if entry.refCount == 0 {
   371  		elementPrev := element.Prev()
   372  		// currSize will be updated within deleteInternal
   373  		c.deleteInternal(element)
   374  		return elementPrev
   375  	}
   376  	// entry.refCount > 0
   377  	// skip, entry still being referenced
   378  	return element.Prev()
   379  }
   380  
   381  func (c *lru) isEntryExpired(entry *entryImpl, currentTime time.Time) bool {
   382  	return entry.refCount == 0 && !entry.createTime.IsZero() && currentTime.After(entry.createTime.Add(c.ttl))
   383  }
   384  
   385  func (c *lru) updateEntryTTL(entry *entryImpl) {
   386  	if c.ttl != 0 {
   387  		entry.createTime = c.timeSource.Now().UTC()
   388  	}
   389  }
   390  
   391  func (c *lru) updateEntryRefCount(entry *entryImpl) {
   392  	if c.pin {
   393  		entry.refCount++
   394  	}
   395  }