github.com/matrixorigin/matrixone@v1.2.0/pkg/fileservice/memorycache/lrucache/shard.go (about)

     1  // Copyright 2022 Matrix Origin
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //      http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package lrucache
    16  
    17  import (
    18  	"context"
    19  	"sync/atomic"
    20  
    21  	"github.com/matrixorigin/matrixone/pkg/fileservice/memorycache/lrucache/internal/hashmap"
    22  	"github.com/matrixorigin/matrixone/pkg/perfcounter"
    23  )
    24  
    25  func (s *shard[K, V]) Set(ctx context.Context, h uint64, key K, value V) {
    26  	size := int64(len(value.Bytes()))
    27  	item := s.allocItem()
    28  	item.h = h
    29  	item.Key = key
    30  	item.Value = value
    31  	item.Size = size
    32  	s.Lock()
    33  	defer s.Unlock()
    34  	if _, ok := s.kv.Get(h, key); ok {
    35  		return
    36  	}
    37  	s.kv.Set(h, key, item)
    38  	s.size += size
    39  	atomic.AddInt64(s.totalSize, size)
    40  	s.evicts.PushFront(item)
    41  	if s.postSet != nil {
    42  		s.postSet(key, value)
    43  	}
    44  	if atomic.LoadInt64(&s.size) >= s.capacity {
    45  		s.evict(ctx)
    46  	}
    47  }
    48  
    49  func (s *shard[K, V]) evict(ctx context.Context) {
    50  	var numEvict, numEvictWithZeroRead int64
    51  	defer func() {
    52  		if numEvict > 0 || numEvictWithZeroRead > 0 {
    53  			perfcounter.Update(ctx, func(set *perfcounter.CounterSet) {
    54  				set.FileService.Cache.LRU.Evict.Add(numEvict)
    55  				set.FileService.Cache.LRU.EvictWithZeroRead.Add(numEvictWithZeroRead)
    56  			})
    57  		}
    58  	}()
    59  
    60  	for {
    61  		if s.size <= s.capacity {
    62  			return
    63  		}
    64  		if s.kv.Len() == 0 {
    65  			return
    66  		}
    67  
    68  		elem := s.evicts.Back()
    69  		for {
    70  			if elem == nil {
    71  				return
    72  			}
    73  			s.kv.Delete(elem.h, elem.Key)
    74  			s.size -= elem.Size
    75  			atomic.AddInt64(s.totalSize, -elem.Size)
    76  			s.evicts.Remove(elem)
    77  			if s.postEvict != nil {
    78  				s.postEvict(elem.Key, elem.Value)
    79  			}
    80  			numEvict++
    81  			if atomic.LoadInt64(&elem.NumRead) == 0 {
    82  				numEvictWithZeroRead++
    83  			}
    84  			s.freeItem(elem)
    85  			break
    86  		}
    87  
    88  	}
    89  }
    90  
    91  func (s *shard[K, V]) Get(ctx context.Context, h uint64, key K) (value V, ok bool) {
    92  	s.RLock()
    93  	defer s.RUnlock()
    94  	if elem, ok := s.kv.Get(h, key); ok {
    95  		atomic.AddInt64(&elem.NumRead, 1)
    96  		if s.postGet != nil {
    97  			s.postGet(key, elem.Value)
    98  		}
    99  		return elem.Value, true
   100  	}
   101  	return
   102  }
   103  
   104  func (s *shard[K, V]) Flush() {
   105  	s.Lock()
   106  	defer s.Unlock()
   107  	for elem := s.evicts.Back(); elem != nil; elem = s.evicts.Back() {
   108  		s.kv.Delete(elem.h, elem.Key)
   109  		s.evicts.Remove(elem)
   110  		if s.postEvict != nil {
   111  			s.postEvict(elem.Key, elem.Value)
   112  		}
   113  		s.freeItem(elem)
   114  	}
   115  	s.size = 0
   116  	s.evicts = newList[K, V]()
   117  	s.kv = hashmap.New[K, lruItem[K, V]](int(s.capacity))
   118  }
   119  
   120  func (s *shard[K, V]) allocItem() *lruItem[K, V] {
   121  	return s.pool.get()
   122  }
   123  
   124  func (s *shard[K, V]) freeItem(item *lruItem[K, V]) {
   125  	item.NumRead = 0
   126  	s.pool.put(item)
   127  }