github.com/klaytn/klaytn@v1.10.2/common/cache.go (about)

     1  // Copyright 2018 The klaytn Authors
     2  // This file is part of the klaytn library.
     3  //
     4  // The klaytn library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The klaytn library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the klaytn library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package common
    18  
    19  import (
    20  	"errors"
    21  	"math"
    22  
    23  	lru "github.com/hashicorp/golang-lru"
    24  	"github.com/klaytn/klaytn/log"
    25  	"github.com/pbnjay/memory"
    26  )
    27  
    28  type CacheType int
    29  
    30  const (
    31  	LRUCacheType CacheType = iota
    32  	LRUShardCacheType
    33  	FIFOCacheType
    34  	ARCChacheType
    35  )
    36  
    37  const (
    38  	cacheLevelSaving  = "saving"
    39  	cacheLevelNormal  = "normal"
    40  	cacheLevelExtreme = "extreme"
    41  )
    42  
    43  const (
    44  	minimumMemorySize      = 16
    45  	defaultCacheUsageLevel = cacheLevelSaving
    46  )
    47  
    48  // it's set by flag.
    49  var DefaultCacheType CacheType = FIFOCacheType
    50  
    51  var (
    52  	logger                     = log.NewModuleLogger(log.Common)
    53  	CacheScale             int = 100                     // Cache size = preset size * CacheScale / 100. Only used when IsScaled == true
    54  	ScaleByCacheUsageLevel int = 100                     // Scale according to cache usage level (%). Only used when IsScaled == true
    55  	TotalPhysicalMemGB     int = getPhysicalMemorySize() // Convert Byte to GByte
    56  )
    57  
    58  // getPhysicalMemorySize returns the system's physical memory value.
    59  // It internally returns a minimumMemorySize if it is an os that does not support using the system call to obtain it,
    60  // or if the system call fails.
    61  func getPhysicalMemorySize() int {
    62  	TotalMemGB := int(memory.TotalMemory() / 1024 / 1024 / 1024)
    63  	if TotalMemGB >= minimumMemorySize {
    64  		return TotalMemGB
    65  	} else {
    66  		if TotalMemGB != 0 {
    67  			logger.Warn("The system's physical memory is less than minimum physical memory size", "physicalSystemMemory(GB)", TotalMemGB, "minimumMemorySize(GB)", minimumMemorySize)
    68  		} else {
    69  			logger.Error("Failed to get the physical memory of the system. Minimum physical memory size is used", "minimumMemorySize(GB)", minimumMemorySize)
    70  		}
    71  		return minimumMemorySize
    72  	}
    73  }
    74  
    75  type CacheKey interface {
    76  	getShardIndex(shardMask int) int
    77  }
    78  
    79  type Cache interface {
    80  	Add(key CacheKey, value interface{}) (evicted bool)
    81  	Get(key CacheKey) (value interface{}, ok bool)
    82  	Contains(key CacheKey) bool
    83  	Purge()
    84  }
    85  
    86  type lruCache struct {
    87  	lru *lru.Cache
    88  }
    89  
    90  type CacheKeyUint64 uint64
    91  
    92  func (key CacheKeyUint64) getShardIndex(shardMask int) int {
    93  	return int(uint64(key) & uint64(shardMask))
    94  }
    95  
    96  func (cache *lruCache) Add(key CacheKey, value interface{}) (evicted bool) {
    97  	return cache.lru.Add(key, value)
    98  }
    99  
   100  func (cache *lruCache) Get(key CacheKey) (value interface{}, ok bool) {
   101  	value, ok = cache.lru.Get(key)
   102  	return
   103  }
   104  
   105  func (cache *lruCache) Contains(key CacheKey) bool {
   106  	return cache.lru.Contains(key)
   107  }
   108  
   109  func (cache *lruCache) Purge() {
   110  	cache.lru.Purge()
   111  }
   112  
   113  func (cache *lruCache) Keys() []interface{} {
   114  	return cache.lru.Keys()
   115  }
   116  
   117  func (cache *lruCache) Peek(key CacheKey) (value interface{}, ok bool) {
   118  	return cache.lru.Peek(key)
   119  }
   120  
   121  func (cache *lruCache) Remove(key CacheKey) {
   122  	cache.lru.Remove(key)
   123  }
   124  
   125  func (cache *lruCache) Len() int {
   126  	return cache.lru.Len()
   127  }
   128  
   129  type arcCache struct {
   130  	arc *lru.ARCCache
   131  }
   132  
   133  func (cache *arcCache) Add(key CacheKey, value interface{}) (evicted bool) {
   134  	cache.arc.Add(key, value)
   135  	// TODO-Klaytn-RemoveLater need to be removed or should be added according to usage of evicted flag
   136  	return true
   137  }
   138  
   139  func (cache *arcCache) Get(key CacheKey) (value interface{}, ok bool) {
   140  	return cache.arc.Get(key)
   141  }
   142  
   143  func (cache *arcCache) Contains(key CacheKey) bool {
   144  	return cache.arc.Contains(key)
   145  }
   146  
   147  func (cache *arcCache) Purge() {
   148  	cache.arc.Purge()
   149  }
   150  
   151  func (cache *arcCache) Keys() []interface{} {
   152  	return cache.arc.Keys()
   153  }
   154  
   155  func (cache *arcCache) Peek(key CacheKey) (value interface{}, ok bool) {
   156  	return cache.arc.Peek(key)
   157  }
   158  
   159  func (cache *arcCache) Remove(key CacheKey) {
   160  	cache.arc.Remove(key)
   161  }
   162  
   163  func (cache *arcCache) Len() int {
   164  	return cache.arc.Len()
   165  }
   166  
   167  type lruShardCache struct {
   168  	shards         []*lru.Cache
   169  	shardIndexMask int
   170  }
   171  
   172  func (cache *lruShardCache) Add(key CacheKey, val interface{}) (evicted bool) {
   173  	shardIndex := key.getShardIndex(cache.shardIndexMask)
   174  	return cache.shards[shardIndex].Add(key, val)
   175  }
   176  
   177  func (cache *lruShardCache) Get(key CacheKey) (value interface{}, ok bool) {
   178  	shardIndex := key.getShardIndex(cache.shardIndexMask)
   179  	return cache.shards[shardIndex].Get(key)
   180  }
   181  
   182  func (cache *lruShardCache) Contains(key CacheKey) bool {
   183  	shardIndex := key.getShardIndex(cache.shardIndexMask)
   184  	return cache.shards[shardIndex].Contains(key)
   185  }
   186  
   187  func (cache *lruShardCache) Purge() {
   188  	for _, shard := range cache.shards {
   189  		s := shard
   190  		go s.Purge()
   191  	}
   192  }
   193  
   194  func NewCache(config CacheConfiger) Cache {
   195  	if config == nil {
   196  		logger.Crit("config shouldn't be nil!")
   197  	}
   198  
   199  	cache, err := config.newCache()
   200  	if err != nil {
   201  		logger.Crit("Failed to allocate cache!", "err", err)
   202  	}
   203  	return cache
   204  }
   205  
   206  type CacheConfiger interface {
   207  	newCache() (Cache, error)
   208  }
   209  
   210  type LRUConfig struct {
   211  	CacheSize int
   212  	IsScaled  bool
   213  }
   214  
   215  func (c LRUConfig) newCache() (Cache, error) {
   216  	cacheSize := c.CacheSize
   217  	if c.IsScaled {
   218  		cacheSize *= calculateScale()
   219  	}
   220  	lru, err := lru.New(cacheSize)
   221  	return &lruCache{lru}, err
   222  }
   223  
   224  type LRUShardConfig struct {
   225  	CacheSize int
   226  	// Hash, and Address type can not generate as many shard indexes as the maximum (2 ^ 16 = 65536),
   227  	// so it is meaningless to set the NumShards larger than this.
   228  	NumShards int
   229  	IsScaled  bool
   230  }
   231  
   232  const (
   233  	minShardSize = 10
   234  	minNumShards = 2
   235  )
   236  
   237  // If key is not common.Hash nor common.Address then you should set numShard 1 or use LRU Cache
   238  // The number of shards is readjusted to meet the minimum shard size.
   239  func (c LRUShardConfig) newCache() (Cache, error) {
   240  	cacheSize := c.CacheSize
   241  	if c.IsScaled {
   242  		cacheSize *= calculateScale()
   243  	}
   244  
   245  	if cacheSize < 1 {
   246  		logger.Error("Negative Cache Size Error", "Cache Size", cacheSize, "Cache Scale", CacheScale)
   247  		return nil, errors.New("Must provide a positive size ")
   248  	}
   249  
   250  	numShards := c.makeNumShardsPowOf2()
   251  
   252  	if c.NumShards != numShards {
   253  		logger.Warn("numShards is ", "Expected", c.NumShards, "Actual", numShards)
   254  	}
   255  	if cacheSize%numShards != 0 {
   256  		logger.Warn("Cache size is ", "Expected", cacheSize, "Actual", cacheSize-(cacheSize%numShards))
   257  	}
   258  
   259  	lruShard := &lruShardCache{shards: make([]*lru.Cache, numShards), shardIndexMask: numShards - 1}
   260  	shardsSize := cacheSize / numShards
   261  	var err error
   262  	for i := 0; i < numShards; i++ {
   263  		lruShard.shards[i], err = lru.NewWithEvict(shardsSize, nil)
   264  
   265  		if err != nil {
   266  			return nil, err
   267  		}
   268  	}
   269  	return lruShard, nil
   270  }
   271  
   272  func (c LRUShardConfig) makeNumShardsPowOf2() int {
   273  	maxNumShards := float64(c.CacheSize * calculateScale() / minShardSize)
   274  	numShards := int(math.Min(float64(c.NumShards), maxNumShards))
   275  
   276  	preNumShards := minNumShards
   277  	for numShards > minNumShards {
   278  		preNumShards = numShards
   279  		numShards = numShards & (numShards - 1)
   280  	}
   281  
   282  	return preNumShards
   283  }
   284  
   285  // FIFOCacheConfig is a implementation of CacheConfiger interface for fifoCache.
   286  type FIFOCacheConfig struct {
   287  	CacheSize int
   288  	IsScaled  bool
   289  }
   290  
   291  // newCache creates a Cache interface whose implementation is fifoCache.
   292  func (c FIFOCacheConfig) newCache() (Cache, error) {
   293  	cacheSize := c.CacheSize
   294  	if c.IsScaled {
   295  		cacheSize *= calculateScale()
   296  	}
   297  
   298  	lru, err := lru.New(cacheSize)
   299  	return &fifoCache{&lruCache{lru}}, err
   300  }
   301  
   302  // fifoCache internally has a lruCache.
   303  // All methods are the same as lruCache, but we override Get function, not to update the lifetime of data.
   304  type fifoCache struct {
   305  	*lruCache
   306  }
   307  
   308  // Get returns the value corresponding to the cache key.
   309  func (cache *fifoCache) Get(key CacheKey) (value interface{}, ok bool) {
   310  	return cache.Peek(key)
   311  }
   312  
   313  type ARCConfig struct {
   314  	CacheSize int
   315  	IsScaled  bool
   316  }
   317  
   318  func (c ARCConfig) newCache() (Cache, error) {
   319  	cacheSize := c.CacheSize
   320  	if c.IsScaled {
   321  		cacheSize *= calculateScale()
   322  	}
   323  	arc, err := lru.NewARC(cacheSize)
   324  	return &arcCache{arc}, err
   325  }
   326  
   327  // calculateScale returns the scale of the cache.
   328  // The scale of the cache is obtained by multiplying (MemorySize / minimumMemorySize), (scaleByCacheUsageLevel / 100), and (CacheScale / 100).
   329  func calculateScale() int {
   330  	return CacheScale * ScaleByCacheUsageLevel * TotalPhysicalMemGB / minimumMemorySize / 100 / 100
   331  }
   332  
   333  // GetScaleByCacheUsageLevel returns the scale according to cacheUsageLevel
   334  func GetScaleByCacheUsageLevel(cacheUsageLevelFlag string) (int, error) {
   335  	switch cacheUsageLevelFlag {
   336  	case cacheLevelSaving:
   337  		return 100, nil
   338  	case cacheLevelNormal:
   339  		return 200, nil
   340  	case cacheLevelExtreme:
   341  		return 300, nil
   342  	default:
   343  		return 100, errors.New("input string does not meet the given format. expected: ('saving', 'normal, 'extreme')")
   344  	}
   345  }
   346  
   347  type GovernanceCacheKey string
   348  
   349  func (g GovernanceCacheKey) getShardIndex(shardMask int) int {
   350  	return 0
   351  }