github.com/Kintar/etxt@v0.0.0-20221224033739-2fc69f000137/ecache/impl_default_cache.go (about) 1 package ecache 2 3 import "sync" 4 import "sync/atomic" 5 import "math/rand" 6 import crand "crypto/rand" 7 import "strconv" 8 import "errors" 9 10 // TODO: it's a bit questionable whether removeRandEntry should take 11 // into account any new hotness as a threshold. May also have 12 // trouble with loops of evictions otherwise, but... unclear what's 13 // the best approach here, or if any of this is significant at all. 14 // Hmmm.. thinking again, this will only happen if the cache is already 15 // full with recent data anyway, which is kinda a bad scenario, and 16 // the alternatives are not necessarily nicer. Though it is simpler 17 // to understand. We would need robust benchmarking to decide, I guess. 18 // TODO: I could make mask entries _not_ be pointers... 19 20 // The default etxt cache. It is concurrent-safe (though not optimized 21 // or expected to be used under heavily concurrent scenarios), it has 22 // memory bounds and uses random sampling for evicting entries. 23 type DefaultCache struct { 24 cachedMasks map[[3]uint64]*CachedMaskEntry 25 rng *rand.Rand 26 spaceBytesLeft uint32 27 lowestBytesLeft uint32 28 byteSizeLimit uint32 29 mutex sync.RWMutex 30 } 31 32 // Creates a new cache bounded by the given size. 33 // 34 // The given size must be at least 1024. If you don't want a 35 // constrained cache, call with (1 << 30) (1GB) or similar. 36 func NewDefaultCache(maxByteSize int) (*DefaultCache, error) { 37 if maxByteSize < 1024 { 38 msg := "cache capacity must be at least 1024 bytes (got " 39 msg += strconv.Itoa(maxByteSize) + ")" 40 return nil, errors.New(msg) 41 } 42 43 randBytes := make([]byte, 8) 44 _, err := crand.Read(randBytes) // spec: n == len(b) iif err == nil 45 if err != nil { 46 return nil, err 47 } 48 seed := int64(0) 49 for _, randByte := range randBytes { 50 seed = (seed | int64(randByte)) << 8 51 } 52 53 return &DefaultCache{ 54 cachedMasks: make(map[[3]uint64]*CachedMaskEntry, 128), 55 spaceBytesLeft: uint32(maxByteSize), 56 lowestBytesLeft: uint32(maxByteSize), 57 byteSizeLimit: uint32(maxByteSize), 58 rng: rand.New(rand.NewSource(seed)), 59 }, nil 60 } 61 62 // Attempts to remove the entry with the lowest eviction cost from a 63 // small pool of samples. May not remove anything in some cases. 64 // 65 // The returned value is the freed space, which must be manually 66 // added to spaceBytesLeft by the caller. 67 func (self *DefaultCache) removeRandEntry(hotness uint32, instant uint32) uint32 { 68 const SampleSize = 10 // TODO: probably allow setting this dynamically? 69 70 self.mutex.RLock() 71 var selectedKey [3]uint64 72 lowestHotness := ^uint32(0) 73 samplesTaken := 0 74 for key, cachedMaskEntry := range self.cachedMasks { 75 currHotness := cachedMaskEntry.Hotness(instant) 76 // on lower hotness, update selected eviction target 77 if currHotness < lowestHotness { 78 lowestHotness = currHotness 79 selectedKey = key 80 } 81 82 // break if we already took enough samples 83 samplesTaken += 1 84 if samplesTaken >= SampleSize { 85 break 86 } 87 } 88 self.mutex.RUnlock() 89 90 // delete selected entry, if any 91 freedSpace := uint32(0) 92 if lowestHotness < hotness { 93 self.mutex.Lock() 94 entry, stillExists := self.cachedMasks[selectedKey] 95 if stillExists { 96 delete(self.cachedMasks, selectedKey) 97 freedSpace = entry.ByteSize 98 } 99 self.mutex.Unlock() 100 } 101 return freedSpace 102 } 103 104 // Stores the given mask with the given key. 105 func (self *DefaultCache) PassMask(key [3]uint64, mask GlyphMask) { 106 const MaxMakeRoomAttempts = 2 107 108 // see if we have enough space to add the mask, or try to 109 // make some room otherwise 110 maskEntry, instant := NewCachedMaskEntry(mask) 111 spaceBytesLeft := atomic.LoadUint32(&self.spaceBytesLeft) 112 freedSpace := uint32(0) 113 if maskEntry.ByteSize > spaceBytesLeft { 114 hotness := maskEntry.Hotness(instant) 115 missingSpace := maskEntry.ByteSize - spaceBytesLeft 116 for i := 0; i < MaxMakeRoomAttempts; i++ { 117 freedSpace += self.removeRandEntry(hotness, instant) 118 if freedSpace >= missingSpace { 119 goto roomMade 120 } 121 } 122 123 // we didn't make enough room for the new entry. desist. 124 if freedSpace != 0 { 125 atomic.AddUint32(&self.spaceBytesLeft, freedSpace) 126 } 127 return 128 } 129 130 roomMade: 131 // add the mask to the cache 132 self.mutex.Lock() 133 defer self.mutex.Unlock() 134 if freedSpace != 0 { 135 atomic.AddUint32(&self.spaceBytesLeft, freedSpace) 136 } 137 _, maskAlreadyExists := self.cachedMasks[key] 138 if maskAlreadyExists { 139 return 140 } 141 if atomic.LoadUint32(&self.spaceBytesLeft) < maskEntry.ByteSize { 142 return 143 } 144 newLeft := atomic.AddUint32(&self.spaceBytesLeft, ^uint32(maskEntry.ByteSize-1)) 145 if newLeft < atomic.LoadUint32(&self.lowestBytesLeft) { 146 atomic.StoreUint32(&self.lowestBytesLeft, newLeft) 147 } 148 self.cachedMasks[key] = maskEntry 149 } 150 151 // Returns an approximation of the number of bytes taken by the 152 // glyph masks currently stored in the cache. 153 func (self *DefaultCache) ApproxByteSize() int { 154 return int(atomic.LoadUint32(&self.byteSizeLimit) - atomic.LoadUint32(&self.spaceBytesLeft)) 155 } 156 157 // Returns an approximation of the maximum amount of bytes that the 158 // cache has been filled with at any point of its life. 159 // 160 // This method can be useful to determine the actual usage of a cache 161 // within your application and set its capacity to a reasonable value. 162 func (self *DefaultCache) PeakSize() int { 163 return int(atomic.LoadUint32(&self.byteSizeLimit) - atomic.LoadUint32(&self.lowestBytesLeft)) 164 } 165 166 // Gets the mask associated to the given key. 167 func (self *DefaultCache) GetMask(key [3]uint64) (GlyphMask, bool) { 168 self.mutex.RLock() 169 entry, found := self.cachedMasks[key] 170 self.mutex.RUnlock() 171 if !found { 172 return nil, false 173 } 174 entry.IncreaseAccessCount() 175 return entry.Mask, true 176 } 177 178 // Returns a new cache handler for the current cache. While DefaultCache 179 // is concurrent-safe, handlers can only be used non-concurrently. One 180 // can create multiple handlers for the same cache to be used with different 181 // renderers. 182 func (self *DefaultCache) NewHandler() *DefaultCacheHandler { 183 var zeroKey [3]uint64 184 return &DefaultCacheHandler{cache: self, activeKey: zeroKey} 185 }