github.com/zuoyebang/bitalosdb@v1.1.1-0.20240516111551-79a8c4d8ce20/internal/cache/lrucache/clockpro.go (about)

     1  // Copyright 2021 The Bitalosdb author(hustxrb@163.com) and other contributors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package lrucache
    16  
    17  import (
    18  	"bytes"
    19  	"fmt"
    20  	"os"
    21  	"runtime/debug"
    22  	"strings"
    23  	"sync"
    24  	"sync/atomic"
    25  
    26  	"github.com/zuoyebang/bitalosdb/internal/base"
    27  	"github.com/zuoyebang/bitalosdb/internal/invariants"
    28  	"github.com/zuoyebang/bitalosdb/internal/options"
    29  )
    30  
    31  type key struct {
    32  	id     uint64
    33  	offset uint64
    34  }
    35  
    36  func (k key) String() string {
    37  	return fmt.Sprintf("%d/%d", k.id, k.offset)
    38  }
    39  
    40  type Handle struct {
    41  	value *Value
    42  }
    43  
    44  func (h Handle) Get() []byte {
    45  	if h.value != nil {
    46  		return h.value.buf
    47  	}
    48  	return nil
    49  }
    50  
    51  func (h Handle) Release() {
    52  	if h.value != nil {
    53  		h.value.release()
    54  	}
    55  }
    56  
    57  type shard struct {
    58  	hits         int64
    59  	misses       int64
    60  	mu           sync.RWMutex
    61  	reservedSize int64
    62  	maxSize      int64
    63  	coldTarget   int64
    64  	blocks       robinHoodMap
    65  	entries      map[*entry]struct{}
    66  	handHot      *entry
    67  	handCold     *entry
    68  	handTest     *entry
    69  	sizeHot      int64
    70  	sizeCold     int64
    71  	sizeTest     int64
    72  	countHot     int64
    73  	countCold    int64
    74  	countTest    int64
    75  }
    76  
    77  func (c *shard) Get(id uint64, offset uint64) Handle {
    78  	c.mu.RLock()
    79  	var value *Value
    80  	if e := c.blocks.Get(key{id, offset}); e != nil {
    81  		value = e.acquireValue()
    82  		if value != nil {
    83  			atomic.StoreInt32(&e.referenced, 1)
    84  		}
    85  	}
    86  	c.mu.RUnlock()
    87  	if value == nil {
    88  		atomic.AddInt64(&c.misses, 1)
    89  		return Handle{}
    90  	}
    91  	atomic.AddInt64(&c.hits, 1)
    92  	return Handle{value: value}
    93  }
    94  
    95  func (c *shard) Set(id uint64, offset uint64, value *Value) Handle {
    96  	if n := value.refs(); n != 1 {
    97  		panic(fmt.Sprintf("cache: Value has already been added to the cache: refs=%d", n))
    98  	}
    99  
   100  	c.mu.Lock()
   101  	defer c.mu.Unlock()
   102  
   103  	k := key{id, offset}
   104  	e := c.blocks.Get(k)
   105  
   106  	switch {
   107  	case e == nil:
   108  		e = newEntry(c, k, int64(len(value.buf)))
   109  		e.setValue(value)
   110  		if c.metaAdd(k, e) {
   111  			value.ref.trace("add-cold")
   112  			c.sizeCold += e.size
   113  			c.countCold++
   114  		} else {
   115  			value.ref.trace("skip-cold")
   116  			e.free()
   117  			e = nil
   118  		}
   119  
   120  	case e.peekValue() != nil:
   121  		e.setValue(value)
   122  		atomic.StoreInt32(&e.referenced, 1)
   123  		delta := int64(len(value.buf)) - e.size
   124  		e.size = int64(len(value.buf))
   125  		if e.ptype == etHot {
   126  			value.ref.trace("add-hot")
   127  			c.sizeHot += delta
   128  		} else {
   129  			value.ref.trace("add-cold")
   130  			c.sizeCold += delta
   131  		}
   132  		c.evict()
   133  
   134  	default:
   135  		c.sizeTest -= e.size
   136  		c.countTest--
   137  		c.metaDel(e)
   138  		c.metaCheck(e)
   139  
   140  		e.size = int64(len(value.buf))
   141  		c.coldTarget += e.size
   142  		if c.coldTarget > c.targetSize() {
   143  			c.coldTarget = c.targetSize()
   144  		}
   145  
   146  		atomic.StoreInt32(&e.referenced, 0)
   147  		e.setValue(value)
   148  		e.ptype = etHot
   149  		if c.metaAdd(k, e) {
   150  			value.ref.trace("add-hot")
   151  			c.sizeHot += e.size
   152  			c.countHot++
   153  		} else {
   154  			value.ref.trace("skip-hot")
   155  			e.free()
   156  			e = nil
   157  		}
   158  	}
   159  
   160  	c.checkConsistency()
   161  
   162  	return Handle{value: value}
   163  }
   164  
   165  func (c *shard) checkConsistency() {
   166  	switch {
   167  	case c.sizeHot < 0 || c.sizeCold < 0 || c.sizeTest < 0 || c.countHot < 0 || c.countCold < 0 || c.countTest < 0:
   168  		panic(fmt.Sprintf("cache: unexpected negative: %d (%d bytes) hot, %d (%d bytes) cold, %d (%d bytes) test",
   169  			c.countHot, c.sizeHot, c.countCold, c.sizeCold, c.countTest, c.sizeTest))
   170  	case c.sizeHot > 0 && c.countHot == 0:
   171  		panic(fmt.Sprintf("cache: mismatch %d hot size, %d hot count", c.sizeHot, c.countHot))
   172  	case c.sizeCold > 0 && c.countCold == 0:
   173  		panic(fmt.Sprintf("cache: mismatch %d cold size, %d cold count", c.sizeCold, c.countCold))
   174  	case c.sizeTest > 0 && c.countTest == 0:
   175  		panic(fmt.Sprintf("cache: mismatch %d test size, %d test count", c.sizeTest, c.countTest))
   176  	}
   177  }
   178  
   179  func (c *shard) Delete(id uint64, offset uint64) {
   180  	k := key{id, offset}
   181  	c.mu.RLock()
   182  	exists := c.blocks.Get(k) != nil
   183  	c.mu.RUnlock()
   184  	if !exists {
   185  		return
   186  	}
   187  
   188  	c.mu.Lock()
   189  	defer c.mu.Unlock()
   190  
   191  	e := c.blocks.Get(k)
   192  	if e == nil {
   193  		return
   194  	}
   195  	c.metaEvict(e)
   196  
   197  	c.checkConsistency()
   198  }
   199  
   200  func (c *shard) Free() {
   201  	c.mu.Lock()
   202  	defer c.mu.Unlock()
   203  
   204  	for c.handHot != nil {
   205  		e := c.handHot
   206  		c.metaDel(c.handHot)
   207  		e.free()
   208  	}
   209  
   210  	c.blocks.free()
   211  }
   212  
   213  func (c *shard) Reserve(n int) {
   214  	c.mu.Lock()
   215  	defer c.mu.Unlock()
   216  	c.reservedSize += int64(n)
   217  
   218  	targetSize := c.targetSize()
   219  	if c.coldTarget > targetSize {
   220  		c.coldTarget = targetSize
   221  	}
   222  
   223  	c.evict()
   224  	c.checkConsistency()
   225  }
   226  
   227  func (c *shard) Size() int64 {
   228  	c.mu.RLock()
   229  	size := c.sizeHot + c.sizeCold
   230  	c.mu.RUnlock()
   231  	return size
   232  }
   233  
   234  func (c *shard) targetSize() int64 {
   235  	target := c.maxSize - c.reservedSize
   236  	if target < 1 {
   237  		return 1
   238  	}
   239  	return target
   240  }
   241  
   242  func (c *shard) metaAdd(key key, e *entry) bool {
   243  	c.evict()
   244  	if e.size > c.targetSize() {
   245  		return false
   246  	}
   247  
   248  	c.blocks.Put(key, e)
   249  	if entriesGoAllocated {
   250  		c.entries[e] = struct{}{}
   251  	}
   252  
   253  	if c.handHot == nil {
   254  		c.handHot = e
   255  		c.handCold = e
   256  		c.handTest = e
   257  	} else {
   258  		c.handHot.link(e)
   259  	}
   260  
   261  	if c.handCold == c.handHot {
   262  		c.handCold = c.handCold.prev()
   263  	}
   264  
   265  	return true
   266  }
   267  
   268  func (c *shard) metaDel(e *entry) {
   269  	if value := e.peekValue(); value != nil {
   270  		value.ref.trace("metaDel")
   271  	}
   272  	e.setValue(nil)
   273  
   274  	c.blocks.Delete(e.key)
   275  	if entriesGoAllocated {
   276  		delete(c.entries, e)
   277  	}
   278  
   279  	if e == c.handHot {
   280  		c.handHot = c.handHot.prev()
   281  	}
   282  	if e == c.handCold {
   283  		c.handCold = c.handCold.prev()
   284  	}
   285  	if e == c.handTest {
   286  		c.handTest = c.handTest.prev()
   287  	}
   288  
   289  	if e.unlink() == e {
   290  		c.handHot = nil
   291  		c.handCold = nil
   292  		c.handTest = nil
   293  	}
   294  }
   295  
   296  func (c *shard) metaCheck(e *entry) {
   297  	if invariants.Enabled {
   298  		if _, ok := c.entries[e]; ok {
   299  			fmt.Fprintf(os.Stderr, "%p: %s unexpectedly found in entries map\n%s",
   300  				e, e.key, debug.Stack())
   301  			os.Exit(1)
   302  		}
   303  		if c.blocks.findByValue(e) != nil {
   304  			fmt.Fprintf(os.Stderr, "%p: %s unexpectedly found in blocks map\n%s\n%s",
   305  				e, e.key, &c.blocks, debug.Stack())
   306  			os.Exit(1)
   307  		}
   308  		var countHot, countCold, countTest int64
   309  		var sizeHot, sizeCold, sizeTest int64
   310  		for t := c.handHot.next(); t != nil; t = t.next() {
   311  			switch t.ptype {
   312  			case etHot:
   313  				countHot++
   314  				sizeHot += t.size
   315  			case etCold:
   316  				countCold++
   317  				sizeCold += t.size
   318  			case etTest:
   319  				countTest++
   320  				sizeTest += t.size
   321  			}
   322  			if e == t {
   323  				fmt.Fprintf(os.Stderr, "%p: %s unexpectedly found in blocks list\n%s",
   324  					e, e.key, debug.Stack())
   325  				os.Exit(1)
   326  			}
   327  			if t == c.handHot {
   328  				break
   329  			}
   330  		}
   331  		if countHot != c.countHot || countCold != c.countCold || countTest != c.countTest ||
   332  			sizeHot != c.sizeHot || sizeCold != c.sizeCold || sizeTest != c.sizeTest {
   333  			fmt.Fprintf(os.Stderr, `divergence of Hot,Cold,Test statistics
   334  				cache's statistics: hot %d, %d, cold %d, %d, test %d, %d
   335  				recalculated statistics: hot %d, %d, cold %d, %d, test %d, %d\n%s`,
   336  				c.countHot, c.sizeHot, c.countCold, c.sizeCold, c.countTest, c.sizeTest,
   337  				countHot, sizeHot, countCold, sizeCold, countTest, sizeTest,
   338  				debug.Stack())
   339  			os.Exit(1)
   340  		}
   341  	}
   342  }
   343  
   344  func (c *shard) metaEvict(e *entry) {
   345  	switch e.ptype {
   346  	case etHot:
   347  		c.sizeHot -= e.size
   348  		c.countHot--
   349  	case etCold:
   350  		c.sizeCold -= e.size
   351  		c.countCold--
   352  	case etTest:
   353  		c.sizeTest -= e.size
   354  		c.countTest--
   355  	}
   356  	c.metaDel(e)
   357  	c.metaCheck(e)
   358  	e.free()
   359  }
   360  
   361  func (c *shard) evict() {
   362  	for c.targetSize() <= c.sizeHot+c.sizeCold && c.handCold != nil {
   363  		c.runHandCold(c.countCold, c.sizeCold)
   364  	}
   365  }
   366  
   367  func (c *shard) runHandCold(countColdDebug, sizeColdDebug int64) {
   368  	if c.countCold != countColdDebug || c.sizeCold != sizeColdDebug {
   369  		panic(fmt.Sprintf("runHandCold: cold count and size are %d, %d, arguments are %d and %d",
   370  			c.countCold, c.sizeCold, countColdDebug, sizeColdDebug))
   371  	}
   372  
   373  	e := c.handCold
   374  	if e.ptype == etCold {
   375  		if atomic.LoadInt32(&e.referenced) == 1 {
   376  			atomic.StoreInt32(&e.referenced, 0)
   377  			e.ptype = etHot
   378  			c.sizeCold -= e.size
   379  			c.countCold--
   380  			c.sizeHot += e.size
   381  			c.countHot++
   382  		} else {
   383  			e.setValue(nil)
   384  			e.ptype = etTest
   385  			c.sizeCold -= e.size
   386  			c.countCold--
   387  			c.sizeTest += e.size
   388  			c.countTest++
   389  			for c.targetSize() < c.sizeTest && c.handTest != nil {
   390  				c.runHandTest()
   391  			}
   392  		}
   393  	}
   394  
   395  	c.handCold = c.handCold.next()
   396  
   397  	for c.targetSize()-c.coldTarget <= c.sizeHot && c.handHot != nil {
   398  		c.runHandHot()
   399  	}
   400  }
   401  
   402  func (c *shard) runHandHot() {
   403  	if c.handHot == c.handTest && c.handTest != nil {
   404  		c.runHandTest()
   405  		if c.handHot == nil {
   406  			return
   407  		}
   408  	}
   409  
   410  	e := c.handHot
   411  	if e.ptype == etHot {
   412  		if atomic.LoadInt32(&e.referenced) == 1 {
   413  			atomic.StoreInt32(&e.referenced, 0)
   414  		} else {
   415  			e.ptype = etCold
   416  			c.sizeHot -= e.size
   417  			c.countHot--
   418  			c.sizeCold += e.size
   419  			c.countCold++
   420  		}
   421  	}
   422  
   423  	c.handHot = c.handHot.next()
   424  }
   425  
   426  func (c *shard) runHandTest() {
   427  	if c.sizeCold > 0 && c.handTest == c.handCold && c.handCold != nil {
   428  		if c.countCold == 0 {
   429  			panic(fmt.Sprintf("cache: mismatch %d cold size, %d cold count", c.sizeCold, c.countCold))
   430  		}
   431  
   432  		c.runHandCold(c.countCold, c.sizeCold)
   433  		if c.handTest == nil {
   434  			return
   435  		}
   436  	}
   437  
   438  	e := c.handTest
   439  	if e.ptype == etTest {
   440  		c.sizeTest -= e.size
   441  		c.countTest--
   442  		c.coldTarget -= e.size
   443  		if c.coldTarget < 0 {
   444  			c.coldTarget = 0
   445  		}
   446  		c.metaDel(e)
   447  		c.metaCheck(e)
   448  		e.free()
   449  	}
   450  
   451  	c.handTest = c.handTest.next()
   452  }
   453  
   454  type LruCache struct {
   455  	refs     int64
   456  	maxSize  int64
   457  	idAlloc  uint64
   458  	shards   []shard
   459  	shardCnt uint64
   460  	logger   base.Logger
   461  	tr       struct {
   462  		sync.Mutex
   463  		msgs []string
   464  	}
   465  }
   466  
   467  func newShards(opts *options.CacheOptions) *LruCache {
   468  	shardNum := opts.Shards
   469  	c := &LruCache{
   470  		refs:     1,
   471  		maxSize:  opts.Size,
   472  		idAlloc:  1,
   473  		shards:   make([]shard, shardNum),
   474  		shardCnt: uint64(shardNum),
   475  		logger:   opts.Logger,
   476  	}
   477  	c.trace("alloc", c.refs)
   478  	shardSize := c.maxSize / int64(shardNum)
   479  	shardHashSize := opts.HashSize / shardNum
   480  	for i := range c.shards {
   481  		c.shards[i] = shard{
   482  			maxSize:    shardSize,
   483  			coldTarget: shardSize,
   484  		}
   485  		if entriesGoAllocated {
   486  			c.shards[i].entries = make(map[*entry]struct{})
   487  		}
   488  		c.shards[i].blocks.init(shardHashSize)
   489  	}
   490  
   491  	invariants.SetFinalizer(c, func(obj interface{}) {
   492  		c := obj.(*LruCache)
   493  		if v := atomic.LoadInt64(&c.refs); v != 0 {
   494  			c.tr.Lock()
   495  			fmt.Fprintf(os.Stderr,
   496  				"cache: cache (%p) has non-zero reference count: %d\n", c, v)
   497  			if len(c.tr.msgs) > 0 {
   498  				fmt.Fprintf(os.Stderr, "%s\n", strings.Join(c.tr.msgs, "\n"))
   499  			}
   500  			c.tr.Unlock()
   501  			os.Exit(1)
   502  		}
   503  	})
   504  	return c
   505  }
   506  
   507  func (lrc *LruCache) getShardByHashId(h uint64) *shard {
   508  	return &lrc.shards[h%lrc.shardCnt]
   509  }
   510  
   511  func (lrc *LruCache) Ref() {
   512  	v := atomic.AddInt64(&lrc.refs, 1)
   513  	if v <= 1 {
   514  		panic(fmt.Sprintf("cache: inconsistent reference count: %d", v))
   515  	}
   516  	lrc.trace("ref", v)
   517  }
   518  
   519  func (lrc *LruCache) Unref() {
   520  	v := atomic.AddInt64(&lrc.refs, -1)
   521  	lrc.trace("unref", v)
   522  	switch {
   523  	case v < 0:
   524  		panic(fmt.Sprintf("cache: inconsistent reference count: %d", v))
   525  	case v == 0:
   526  		for i := range lrc.shards {
   527  			lrc.shards[i].Free()
   528  		}
   529  	}
   530  }
   531  
   532  func (lrc *LruCache) getValue(id uint64, offset uint64) Handle {
   533  	return lrc.getShardByHashId(offset).Get(id, offset)
   534  }
   535  
   536  func (lrc *LruCache) setValue(id uint64, offset uint64, value *Value) Handle {
   537  	return lrc.getShardByHashId(offset).Set(id, offset, value)
   538  }
   539  
   540  func (lrc *LruCache) del(id uint64, offset uint64) {
   541  	lrc.getShardByHashId(offset).Delete(id, offset)
   542  }
   543  
   544  func (lrc *LruCache) MaxSize() int64 {
   545  	return lrc.maxSize
   546  }
   547  
   548  func (lrc *LruCache) Size() int64 {
   549  	var size int64
   550  	for i := range lrc.shards {
   551  		size += lrc.shards[i].Size()
   552  	}
   553  	return size
   554  }
   555  
   556  func (lrc *LruCache) Alloc(n int) *Value {
   557  	return newValue(n)
   558  }
   559  
   560  func (lrc *LruCache) Free(v *Value) {
   561  	if n := v.refs(); n > 1 {
   562  		panic(fmt.Sprintf("cache: Value has been added to the cache: refs=%d", n))
   563  	}
   564  	v.release()
   565  }
   566  
   567  func (lrc *LruCache) Reserve(n int) func() {
   568  	shardN := (n + len(lrc.shards) - 1) / len(lrc.shards)
   569  	for i := range lrc.shards {
   570  		lrc.shards[i].Reserve(shardN)
   571  	}
   572  	return func() {
   573  		if shardN == -1 {
   574  			panic("cache: cache reservation already released")
   575  		}
   576  		for i := range lrc.shards {
   577  			lrc.shards[i].Reserve(-shardN)
   578  		}
   579  		shardN = -1
   580  	}
   581  }
   582  
   583  type Metrics struct {
   584  	Size   int64
   585  	Count  int64
   586  	Hits   int64
   587  	Misses int64
   588  
   589  	ShardsMetrics []ShardMetrics
   590  }
   591  
   592  type ShardMetrics struct {
   593  	Size  int64
   594  	Count int64
   595  }
   596  
   597  func (m Metrics) String() string {
   598  	var shards bytes.Buffer
   599  	for i := range m.ShardsMetrics {
   600  		shards.WriteString(fmt.Sprintf("[%d:%d:%d]", i, m.ShardsMetrics[i].Size, m.ShardsMetrics[i].Count))
   601  	}
   602  
   603  	return fmt.Sprintf("size:%d count:%d hit:%d mis:%d shards:%s", m.Size, m.Count, m.Hits, m.Misses, shards.String())
   604  }
   605  
   606  func (lrc *LruCache) Metrics() Metrics {
   607  	var m Metrics
   608  	m.ShardsMetrics = make([]ShardMetrics, lrc.shardCnt)
   609  	for i := range lrc.shards {
   610  		s := &lrc.shards[i]
   611  		s.mu.RLock()
   612  		size := s.sizeHot + s.sizeCold
   613  		count := int64(s.blocks.Count())
   614  		m.ShardsMetrics[i] = ShardMetrics{
   615  			Size:  size,
   616  			Count: count,
   617  		}
   618  		m.Size += size
   619  		m.Count += count
   620  		s.mu.RUnlock()
   621  		m.Hits += atomic.LoadInt64(&s.hits)
   622  		m.Misses += atomic.LoadInt64(&s.misses)
   623  	}
   624  	return m
   625  }
   626  
   627  func (lrc *LruCache) MetricsInfo() string {
   628  	return lrc.Metrics().String()
   629  }
   630  
   631  func (lrc *LruCache) NewID() uint64 {
   632  	return atomic.AddUint64(&lrc.idAlloc, 1)
   633  }