github.com/decred/dcrlnd@v0.7.6/neutrinocache/lru/lru.go (about)

     1  package lru
     2  
     3  import (
     4  	"container/list"
     5  	"fmt"
     6  	"sync"
     7  
     8  	"github.com/decred/dcrlnd/neutrinocache"
     9  )
    10  
    11  // elementMap is an alias for a map from a generic interface to a list.Element.
    12  type elementMap map[interface{}]*list.Element
    13  
    14  // entry represents a (key,value) pair entry in the Cache. The Cache's list
    15  // stores entries which let us get the cache key when an entry is evicted.
    16  type entry struct {
    17  	key   interface{}
    18  	value cache.Value
    19  }
    20  
    21  // Cache provides a generic thread-safe lru cache that can be used for
    22  // storing filters, blocks, etc.
    23  type Cache struct {
    24  	// capacity represents how much this cache can hold. It could be number
    25  	// of elements or a number of bytes, decided by the cache.Value's Size.
    26  	capacity uint64
    27  
    28  	// size represents the size of all the elements currenty in the cache.
    29  	size uint64
    30  
    31  	// ll is a doubly linked list which keeps track of recency of used
    32  	// elements by moving them to the front.
    33  	ll *list.List
    34  
    35  	// cache is a generic cache which allows us to find an elements position
    36  	// in the ll list from a given key.
    37  	cache elementMap
    38  
    39  	// mtx is used to make sure the Cache is thread-safe.
    40  	mtx sync.RWMutex
    41  }
    42  
    43  // NewCache return a cache with specified capacity, the cache's size can't
    44  // exceed that given capacity.
    45  func NewCache(capacity uint64) *Cache {
    46  	return &Cache{
    47  		capacity: capacity,
    48  		ll:       list.New(),
    49  		cache:    make(elementMap),
    50  	}
    51  }
    52  
    53  // evict will evict as many elements as necessary to make enough space for a new
    54  // element with size needed to be inserted.
    55  func (c *Cache) evict(needed uint64) (bool, error) {
    56  	if needed > c.capacity {
    57  		return false, fmt.Errorf("can't evict %v elements in size, "+
    58  			"since capacity is %v", needed, c.capacity)
    59  	}
    60  
    61  	evicted := false
    62  	for c.capacity-c.size < needed {
    63  		// We still need to evict some more elements.
    64  		if c.ll.Len() == 0 {
    65  			// We should never reach here.
    66  			return false, fmt.Errorf("all elements got evicted, "+
    67  				"yet still need to evict %v, likelihood of "+
    68  				"error during size calculation",
    69  				needed-(c.capacity-c.size))
    70  		}
    71  
    72  		// Find the least recently used item.
    73  		if elr := c.ll.Back(); elr != nil {
    74  			// Determine lru item's size.
    75  			ce := elr.Value.(*entry)
    76  			es, err := ce.value.Size()
    77  			if err != nil {
    78  				return false, fmt.Errorf("couldn't determine "+
    79  					"size of existing cache value %v", err)
    80  			}
    81  
    82  			// Account for that element's removal in evicted and
    83  			// cache size.
    84  			c.size -= es
    85  
    86  			// Remove the element from the cache.
    87  			c.ll.Remove(elr)
    88  			delete(c.cache, ce.key)
    89  			evicted = true
    90  		}
    91  	}
    92  
    93  	return evicted, nil
    94  }
    95  
    96  // Put inserts a given (key,value) pair into the cache, if the key already
    97  // exists, it will replace value and update it to be most recent item in cache.
    98  // The return value indicates whether items had to be evicted to make room for
    99  // the new element.
   100  func (c *Cache) Put(key interface{}, value cache.Value) (bool, error) {
   101  	vs, err := value.Size()
   102  	if err != nil {
   103  		return false, fmt.Errorf("couldn't determine size of cache "+
   104  			"value: %v", err)
   105  	}
   106  
   107  	if vs > c.capacity {
   108  		return false, fmt.Errorf("can't insert entry of size %v into "+
   109  			"cache with capacity %v", vs, c.capacity)
   110  	}
   111  
   112  	c.mtx.Lock()
   113  	defer c.mtx.Unlock()
   114  
   115  	// If the element already exists, remove it and decrease cache's size.
   116  	el, ok := c.cache[key]
   117  	if ok {
   118  		es, err := el.Value.(*entry).value.Size()
   119  		if err != nil {
   120  			return false, fmt.Errorf("couldn't determine size of "+
   121  				"existing cache value %v", err)
   122  		}
   123  		c.ll.Remove(el)
   124  		c.size -= es
   125  	}
   126  
   127  	// Then we need to make sure we have enough space for the element, evict
   128  	// elements if we need more space.
   129  	evicted, err := c.evict(vs)
   130  	if err != nil {
   131  		return false, err
   132  	}
   133  
   134  	// We have made enough space in the cache, so just insert it.
   135  	el = c.ll.PushFront(&entry{key, value})
   136  	c.cache[key] = el
   137  	c.size += vs
   138  
   139  	return evicted, nil
   140  }
   141  
   142  // Get will return value for a given key, making the element the most recently
   143  // accessed item in the process. Will return nil if the key isn't found.
   144  func (c *Cache) Get(key interface{}) (cache.Value, error) {
   145  	c.mtx.Lock()
   146  	defer c.mtx.Unlock()
   147  
   148  	el, ok := c.cache[key]
   149  	if !ok {
   150  		// Element not found in the cache.
   151  		return nil, cache.ErrElementNotFound
   152  	}
   153  
   154  	// When the cache needs to evict a element to make space for another
   155  	// one, it starts eviction from the back, so by moving this element to
   156  	// the front, it's eviction is delayed because it's recently accessed.
   157  	c.ll.MoveToFront(el)
   158  	return el.Value.(*entry).value, nil
   159  }
   160  
   161  // Len returns number of elements in the cache.
   162  func (c *Cache) Len() int {
   163  	c.mtx.RLock()
   164  	defer c.mtx.RUnlock()
   165  
   166  	return c.ll.Len()
   167  }