github.com/matrixorigin/matrixone@v1.2.0/pkg/fileservice/mem_cache.go (about)

     1  // Copyright 2022 Matrix Origin
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //      http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package fileservice
    16  
    17  import (
    18  	"context"
    19  
    20  	"github.com/matrixorigin/matrixone/pkg/fileservice/memorycache"
    21  	"github.com/matrixorigin/matrixone/pkg/fileservice/memorycache/checks/interval"
    22  	"github.com/matrixorigin/matrixone/pkg/perfcounter"
    23  	metric "github.com/matrixorigin/matrixone/pkg/util/metric/v2"
    24  )
    25  
    26  type MemCache struct {
    27  	cache       *memorycache.Cache
    28  	counterSets []*perfcounter.CounterSet
    29  }
    30  
    31  func NewMemCache(
    32  	dataCache *memorycache.Cache,
    33  	counterSets []*perfcounter.CounterSet,
    34  ) *MemCache {
    35  	ret := &MemCache{
    36  		cache:       dataCache,
    37  		counterSets: counterSets,
    38  	}
    39  	return ret
    40  }
    41  
    42  func NewMemoryCache(
    43  	capacity int64,
    44  	checkOverlaps bool,
    45  	callbacks *CacheCallbacks,
    46  ) *memorycache.Cache {
    47  
    48  	var overlapChecker *interval.OverlapChecker
    49  	if checkOverlaps {
    50  		overlapChecker = interval.NewOverlapChecker("MemCache_LRU")
    51  	}
    52  
    53  	postSetFn := func(key CacheKey, value memorycache.CacheData) {
    54  		if overlapChecker != nil {
    55  			if err := overlapChecker.Insert(key.Path, key.Offset, key.Offset+key.Sz); err != nil {
    56  				panic(err)
    57  			}
    58  		}
    59  
    60  		if callbacks != nil {
    61  			for _, fn := range callbacks.PostSet {
    62  				fn(key, value)
    63  			}
    64  		}
    65  	}
    66  
    67  	postGetFn := func(key CacheKey, value memorycache.CacheData) {
    68  		if callbacks != nil {
    69  			for _, fn := range callbacks.PostGet {
    70  				fn(key, value)
    71  			}
    72  		}
    73  	}
    74  
    75  	postEvictFn := func(key CacheKey, value memorycache.CacheData) {
    76  		if overlapChecker != nil {
    77  			if err := overlapChecker.Remove(key.Path, key.Offset, key.Offset+key.Sz); err != nil {
    78  				panic(err)
    79  			}
    80  		}
    81  
    82  		if callbacks != nil {
    83  			for _, fn := range callbacks.PostEvict {
    84  				fn(key, value)
    85  			}
    86  		}
    87  	}
    88  	return memorycache.NewCache(capacity, postSetFn, postGetFn, postEvictFn)
    89  }
    90  
    91  var _ IOVectorCache = new(MemCache)
    92  
    93  func (m *MemCache) Alloc(n int) memorycache.CacheData {
    94  	return m.cache.Alloc(n)
    95  }
    96  
    97  func (m *MemCache) Read(
    98  	ctx context.Context,
    99  	vector *IOVector,
   100  ) (
   101  	err error,
   102  ) {
   103  
   104  	if vector.Policy.Any(SkipMemoryCacheReads) {
   105  		return nil
   106  	}
   107  
   108  	var numHit, numRead int64
   109  	defer func() {
   110  		metric.FSReadHitMemCounter.Add(float64(numHit))
   111  		perfcounter.Update(ctx, func(c *perfcounter.CounterSet) {
   112  			c.FileService.Cache.Read.Add(numRead)
   113  			c.FileService.Cache.Hit.Add(numHit)
   114  			c.FileService.Cache.Memory.Read.Add(numRead)
   115  			c.FileService.Cache.Memory.Hit.Add(numHit)
   116  			c.FileService.Cache.Memory.Capacity.Swap(m.cache.Capacity())
   117  			c.FileService.Cache.Memory.Used.Swap(m.cache.Used())
   118  			c.FileService.Cache.Memory.Available.Swap(m.cache.Available())
   119  		}, m.counterSets...)
   120  	}()
   121  
   122  	path, err := ParsePath(vector.FilePath)
   123  	if err != nil {
   124  		return err
   125  	}
   126  
   127  	for i, entry := range vector.Entries {
   128  		if entry.done {
   129  			continue
   130  		}
   131  		key := CacheKey{
   132  			Path:   path.File,
   133  			Offset: entry.Offset,
   134  			Sz:     entry.Size,
   135  		}
   136  		bs, ok := m.cache.Get(ctx, key)
   137  		numRead++
   138  		if ok {
   139  			vector.Entries[i].CachedData = bs
   140  			vector.Entries[i].done = true
   141  			vector.Entries[i].fromCache = m
   142  			numHit++
   143  		}
   144  	}
   145  
   146  	return
   147  }
   148  
   149  func (m *MemCache) Update(
   150  	ctx context.Context,
   151  	vector *IOVector,
   152  	async bool,
   153  ) error {
   154  
   155  	if vector.Policy.Any(SkipMemoryCacheWrites) {
   156  		return nil
   157  	}
   158  
   159  	path, err := ParsePath(vector.FilePath)
   160  	if err != nil {
   161  		return err
   162  	}
   163  
   164  	for _, entry := range vector.Entries {
   165  		if entry.CachedData == nil {
   166  			continue
   167  		}
   168  		if entry.fromCache == m {
   169  			continue
   170  		}
   171  
   172  		key := CacheKey{
   173  			Path:   path.File,
   174  			Offset: entry.Offset,
   175  			Sz:     entry.Size,
   176  		}
   177  
   178  		m.cache.Set(ctx, key, entry.CachedData)
   179  	}
   180  	return nil
   181  }
   182  
   183  func (m *MemCache) Flush() {
   184  	m.cache.Flush()
   185  }
   186  
   187  func (m *MemCache) DeletePaths(
   188  	ctx context.Context,
   189  	paths []string,
   190  ) error {
   191  	m.cache.DeletePaths(ctx, paths)
   192  	return nil
   193  }