github.com/Finschia/finschia-sdk@v0.48.1/store/cachekv/store.go (about) 1 package cachekv 2 3 import ( 4 "bytes" 5 "io" 6 "sort" 7 "sync" 8 "time" 9 10 dbm "github.com/tendermint/tm-db" 11 12 "github.com/Finschia/ostracon/libs/math" 13 14 "github.com/Finschia/finschia-sdk/internal/conv" 15 "github.com/Finschia/finschia-sdk/store/listenkv" 16 "github.com/Finschia/finschia-sdk/store/tracekv" 17 "github.com/Finschia/finschia-sdk/store/types" 18 "github.com/Finschia/finschia-sdk/telemetry" 19 "github.com/Finschia/finschia-sdk/types/kv" 20 ) 21 22 // If value is nil but deleted is false, it means the parent doesn't have the 23 // key. (No need to delete upon Write()) 24 type cValue struct { 25 value []byte 26 dirty bool 27 } 28 29 // Store wraps an in-memory cache around an underlying types.KVStore. 30 // Set, Delete and Write for the same key must be called sequentially. 31 type Store struct { 32 mtx sync.Mutex 33 cache map[string]*cValue 34 deleted map[string]struct{} 35 unsortedCache map[string]struct{} 36 sortedCache *dbm.MemDB // always ascending sorted 37 parent types.KVStore 38 } 39 40 var _ types.CacheKVStore = (*Store)(nil) 41 42 // NewStore creates a new Store object 43 func NewStore(parent types.KVStore) *Store { 44 return &Store{ 45 cache: make(map[string]*cValue), 46 deleted: make(map[string]struct{}), 47 unsortedCache: make(map[string]struct{}), 48 sortedCache: dbm.NewMemDB(), 49 parent: parent, 50 } 51 } 52 53 // GetStoreType implements Store. 54 func (store *Store) GetStoreType() types.StoreType { 55 return store.parent.GetStoreType() 56 } 57 58 // Get implements types.KVStore. 59 func (store *Store) Get(key []byte) (value []byte) { 60 store.mtx.Lock() 61 defer store.mtx.Unlock() 62 63 types.AssertValidKey(key) 64 65 cacheValue, ok := store.cache[conv.UnsafeBytesToStr(key)] 66 if !ok { 67 value = store.parent.Get(key) 68 store.setCacheValue(key, value, false, false) 69 } else { 70 value = cacheValue.value 71 } 72 73 return value 74 } 75 76 // Set implements types.KVStore. 77 func (store *Store) Set(key []byte, value []byte) { 78 store.mtx.Lock() 79 defer store.mtx.Unlock() 80 81 types.AssertValidKey(key) 82 types.AssertValidValue(value) 83 84 store.setCacheValue(key, value, false, true) 85 } 86 87 // Has implements types.KVStore. 88 func (store *Store) Has(key []byte) bool { 89 value := store.Get(key) 90 return value != nil 91 } 92 93 // Delete implements types.KVStore. 94 func (store *Store) Delete(key []byte) { 95 store.mtx.Lock() 96 defer store.mtx.Unlock() 97 defer telemetry.MeasureSince(time.Now(), "store", "cachekv", "delete") 98 99 types.AssertValidKey(key) 100 store.setCacheValue(key, nil, true, true) 101 } 102 103 // Implements Cachetypes.KVStore. 104 func (store *Store) Write() { 105 store.mtx.Lock() 106 defer store.mtx.Unlock() 107 defer telemetry.MeasureSince(time.Now(), "store", "cachekv", "write") 108 109 // We need a copy of all of the keys. 110 // Not the best, but probably not a bottleneck depending. 111 keys := make([]string, 0, len(store.cache)) 112 113 for key, dbValue := range store.cache { 114 if dbValue.dirty { 115 keys = append(keys, key) 116 } 117 } 118 119 sort.Strings(keys) 120 121 // TODO: Consider allowing usage of Batch, which would allow the write to 122 // at least happen atomically. 123 for _, key := range keys { 124 if store.isDeleted(key) { 125 // We use []byte(key) instead of conv.UnsafeStrToBytes because we cannot 126 // be sure if the underlying store might do a save with the byteslice or 127 // not. Once we get confirmation that .Delete is guaranteed not to 128 // save the byteslice, then we can assume only a read-only copy is sufficient. 129 store.parent.Delete([]byte(key)) 130 continue 131 } 132 133 cacheValue := store.cache[key] 134 if cacheValue.value != nil { 135 // It already exists in the parent, hence delete it. 136 store.parent.Set([]byte(key), cacheValue.value) 137 } 138 } 139 140 // Clear the cache using the map clearing idiom 141 // and not allocating fresh objects. 142 // Please see https://bencher.orijtech.com/perfclinic/mapclearing/ 143 for key := range store.cache { 144 delete(store.cache, key) 145 } 146 for key := range store.deleted { 147 delete(store.deleted, key) 148 } 149 for key := range store.unsortedCache { 150 delete(store.unsortedCache, key) 151 } 152 store.sortedCache = dbm.NewMemDB() 153 } 154 155 // CacheWrap implements CacheWrapper. 156 func (store *Store) CacheWrap() types.CacheWrap { 157 return NewStore(store) 158 } 159 160 // CacheWrapWithTrace implements the CacheWrapper interface. 161 func (store *Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap { 162 return NewStore(tracekv.NewStore(store, w, tc)) 163 } 164 165 // CacheWrapWithListeners implements the CacheWrapper interface. 166 func (store *Store) CacheWrapWithListeners(storeKey types.StoreKey, listeners []types.WriteListener) types.CacheWrap { 167 return NewStore(listenkv.NewStore(store, storeKey, listeners)) 168 } 169 170 //---------------------------------------- 171 // Iteration 172 173 // Iterator implements types.KVStore. 174 func (store *Store) Iterator(start, end []byte) types.Iterator { 175 return store.iterator(start, end, true) 176 } 177 178 // ReverseIterator implements types.KVStore. 179 func (store *Store) ReverseIterator(start, end []byte) types.Iterator { 180 return store.iterator(start, end, false) 181 } 182 183 func (store *Store) iterator(start, end []byte, ascending bool) types.Iterator { 184 store.mtx.Lock() 185 defer store.mtx.Unlock() 186 187 var parent, cache types.Iterator 188 189 if ascending { 190 parent = store.parent.Iterator(start, end) 191 } else { 192 parent = store.parent.ReverseIterator(start, end) 193 } 194 195 store.dirtyItems(start, end) 196 cache = newMemIterator(start, end, store.sortedCache, store.deleted, ascending) 197 198 return newCacheMergeIterator(parent, cache, ascending) 199 } 200 201 func findStartIndex(strL []string, startQ string) int { 202 // Modified binary search to find the very first element in >=startQ. 203 if len(strL) == 0 { 204 return -1 205 } 206 207 var left, right, mid int 208 right = len(strL) - 1 209 for left <= right { 210 mid = (left + right) >> 1 211 midStr := strL[mid] 212 if midStr == startQ { 213 // Handle condition where there might be multiple values equal to startQ. 214 // We are looking for the very first value < midStL, that i+1 will be the first 215 // element >= midStr. 216 for i := mid - 1; i >= 0; i-- { 217 if strL[i] != midStr { 218 return i + 1 219 } 220 } 221 return 0 222 } 223 if midStr < startQ { 224 left = mid + 1 225 } else { // midStrL > startQ 226 right = mid - 1 227 } 228 } 229 if left >= 0 && left < len(strL) && strL[left] >= startQ { 230 return left 231 } 232 return -1 233 } 234 235 func findEndIndex(strL []string, endQ string) int { 236 if len(strL) == 0 { 237 return -1 238 } 239 240 // Modified binary search to find the very first element <endQ. 241 var left, right, mid int 242 right = len(strL) - 1 243 for left <= right { 244 mid = (left + right) >> 1 245 midStr := strL[mid] 246 if midStr == endQ { 247 // Handle condition where there might be multiple values equal to startQ. 248 // We are looking for the very first value < midStL, that i+1 will be the first 249 // element >= midStr. 250 for i := mid - 1; i >= 0; i-- { 251 if strL[i] < midStr { 252 return i + 1 253 } 254 } 255 return 0 256 } 257 if midStr < endQ { 258 left = mid + 1 259 } else { // midStrL > startQ 260 right = mid - 1 261 } 262 } 263 264 // Binary search failed, now let's find a value less than endQ. 265 for i := right; i >= 0; i-- { 266 if strL[i] < endQ { 267 return i 268 } 269 } 270 271 return -1 272 } 273 274 type sortState int 275 276 const ( 277 stateUnsorted sortState = iota 278 stateAlreadySorted 279 ) 280 281 const minSortSize = 1024 282 283 // Constructs a slice of dirty items, to use w/ memIterator. 284 func (store *Store) dirtyItems(start, end []byte) { 285 startStr, endStr := conv.UnsafeBytesToStr(start), conv.UnsafeBytesToStr(end) 286 if startStr > endStr { 287 // Nothing to do here. 288 return 289 } 290 291 n := len(store.unsortedCache) 292 unsorted := make([]*kv.Pair, 0) 293 // If the unsortedCache is too big, its costs too much to determine 294 // whats in the subset we are concerned about. 295 // If you are interleaving iterator calls with writes, this can easily become an 296 // O(N^2) overhead. 297 // Even without that, too many range checks eventually becomes more expensive 298 // than just not having the cache. 299 if n < minSortSize { 300 for key := range store.unsortedCache { 301 if dbm.IsKeyInDomain(conv.UnsafeStrToBytes(key), start, end) { 302 cacheValue := store.cache[key] 303 unsorted = append(unsorted, &kv.Pair{Key: []byte(key), Value: cacheValue.value}) 304 } 305 } 306 store.clearUnsortedCacheSubset(unsorted, stateUnsorted) 307 return 308 } 309 310 // Otherwise it is large so perform a modified binary search to find 311 // the target ranges for the keys that we should be looking for. 312 strL := make([]string, 0, n) 313 for key := range store.unsortedCache { 314 strL = append(strL, key) 315 } 316 sort.Strings(strL) 317 318 startIndex, endIndex := findStartEndIndex(strL, startStr, endStr) 319 320 // Since we spent cycles to sort the values, we should process and remove a reasonable amount 321 // ensure start to end is at least minSortSize in size 322 // if below minSortSize, expand it to cover additional values 323 // this amortizes the cost of processing elements across multiple calls 324 if endIndex-startIndex < minSortSize { 325 endIndex = math.MinInt(startIndex+minSortSize, len(strL)-1) 326 if endIndex-startIndex < minSortSize { 327 startIndex = math.MaxInt(endIndex-minSortSize, 0) 328 } 329 } 330 331 kvL := make([]*kv.Pair, 0) 332 for i := startIndex; i <= endIndex; i++ { 333 key := strL[i] 334 cacheValue := store.cache[key] 335 kvL = append(kvL, &kv.Pair{Key: []byte(key), Value: cacheValue.value}) 336 } 337 338 // kvL was already sorted so pass it in as is. 339 store.clearUnsortedCacheSubset(kvL, stateAlreadySorted) 340 } 341 342 func findStartEndIndex(strL []string, startStr, endStr string) (int, int) { 343 // Now find the values within the domain 344 // [start, end) 345 startIndex := findStartIndex(strL, startStr) 346 endIndex := findEndIndex(strL, endStr) 347 348 if endIndex < 0 { 349 endIndex = len(strL) - 1 350 } 351 if startIndex < 0 { 352 startIndex = 0 353 } 354 return startIndex, endIndex 355 } 356 357 func (store *Store) clearUnsortedCacheSubset(unsorted []*kv.Pair, sortState sortState) { 358 store.deleteKeysFromUnsortedCache(unsorted) 359 360 if sortState == stateUnsorted { 361 sort.Slice(unsorted, func(i, j int) bool { 362 return bytes.Compare(unsorted[i].Key, unsorted[j].Key) < 0 363 }) 364 } 365 366 for _, item := range unsorted { 367 if item.Value == nil { 368 // deleted element, tracked by store.deleted 369 // setting arbitrary value 370 // TODO: Don't ignore this error. 371 store.sortedCache.Set(item.Key, []byte{}) 372 continue 373 } 374 err := store.sortedCache.Set(item.Key, item.Value) 375 if err != nil { 376 panic(err) 377 } 378 } 379 } 380 381 func (store *Store) deleteKeysFromUnsortedCache(unsorted []*kv.Pair) { 382 n := len(store.unsortedCache) 383 if len(unsorted) == n { // This pattern allows the Go compiler to emit the map clearing idiom for the entire map. 384 for key := range store.unsortedCache { 385 delete(store.unsortedCache, key) 386 } 387 } else { // Otherwise, normally delete the unsorted keys from the map. 388 for _, kv := range unsorted { 389 delete(store.unsortedCache, conv.UnsafeBytesToStr(kv.Key)) 390 } 391 } 392 } 393 394 //---------------------------------------- 395 // etc 396 397 // Only entrypoint to mutate store.cache. 398 func (store *Store) setCacheValue(key, value []byte, deleted bool, dirty bool) { 399 types.AssertValidKey(key) 400 401 keyStr := conv.UnsafeBytesToStr(key) 402 store.cache[keyStr] = &cValue{ 403 value: value, 404 dirty: dirty, 405 } 406 if deleted { 407 store.deleted[keyStr] = struct{}{} 408 } else { 409 delete(store.deleted, keyStr) 410 } 411 if dirty { 412 store.unsortedCache[keyStr] = struct{}{} 413 } 414 } 415 416 func (store *Store) isDeleted(key string) bool { 417 _, ok := store.deleted[key] 418 return ok 419 }