github.com/whtcorpsinc/MilevaDB-Prod@v0.0.0-20211104133533-f57f4be3b597/interlock/apply_cache.go (about)

     1  // Copyright 2020 WHTCORPS INC, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package interlock
    15  
    16  import (
    17  	"github.com/cznic/mathutil"
    18  	"github.com/whtcorpsinc/milevadb/soliton/chunk"
    19  	"github.com/whtcorpsinc/milevadb/soliton/ekvcache"
    20  	"github.com/whtcorpsinc/milevadb/soliton/memory"
    21  	"github.com/whtcorpsinc/milevadb/stochastikctx"
    22  )
    23  
    24  // applyCache is used in the apply interlock. When we get the same value of the outer event.
    25  // We fetch the inner rows in the cache not to fetch them in the inner interlock.
    26  type applyCache struct {
    27  	cache       *ekvcache.SimpleLRUCache
    28  	memCapacity int64
    29  	memTracker  *memory.Tracker // track memory usage.
    30  }
    31  
    32  type applyCacheKey []byte
    33  
    34  func (key applyCacheKey) Hash() []byte {
    35  	return key
    36  }
    37  
    38  func applyCacheKVMem(key applyCacheKey, value *chunk.List) int64 {
    39  	return int64(len(key)) + value.GetMemTracker().BytesConsumed()
    40  }
    41  
    42  func newApplyCache(ctx stochastikctx.Context) (*applyCache, error) {
    43  	// since applyCache controls the memory usage by itself, set the capacity of
    44  	// the underlying LRUCache to max to close its memory control
    45  	cache := ekvcache.NewSimpleLRUCache(mathutil.MaxUint, 0.1, 0)
    46  	c := applyCache{
    47  		cache:       cache,
    48  		memCapacity: ctx.GetStochastikVars().NestedLoopJoinCacheCapacity,
    49  		memTracker:  memory.NewTracker(memory.LabelForApplyCache, -1),
    50  	}
    51  	return &c, nil
    52  }
    53  
    54  // Get gets a cache item according to cache key.
    55  func (c *applyCache) Get(key applyCacheKey) (*chunk.List, error) {
    56  	value, hit := c.cache.Get(&key)
    57  	if !hit {
    58  		return nil, nil
    59  	}
    60  	typedValue := value.(*chunk.List)
    61  	return typedValue, nil
    62  }
    63  
    64  // Set inserts an item to the cache.
    65  func (c *applyCache) Set(key applyCacheKey, value *chunk.List) (bool, error) {
    66  	mem := applyCacheKVMem(key, value)
    67  	if mem > c.memCapacity { // ignore this ekv pair if its size is too large
    68  		return false, nil
    69  	}
    70  	for mem+c.memTracker.BytesConsumed() > c.memCapacity {
    71  		evictedKey, evictedValue, evicted := c.cache.RemoveOldest()
    72  		if !evicted {
    73  			return false, nil
    74  		}
    75  		c.memTracker.Consume(-applyCacheKVMem(evictedKey.(applyCacheKey), evictedValue.(*chunk.List)))
    76  	}
    77  	c.memTracker.Consume(mem)
    78  	c.cache.Put(key, value)
    79  	return true, nil
    80  }
    81  
    82  // GetMemTracker returns the memory tracker of this apply cache.
    83  func (c *applyCache) GetMemTracker() *memory.Tracker {
    84  	return c.memTracker
    85  }