github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/ledger/complete/mtrie/trieCache.go (about) 1 package mtrie 2 3 import ( 4 "sync" 5 6 "github.com/onflow/flow-go/ledger" 7 "github.com/onflow/flow-go/ledger/complete/mtrie/trie" 8 ) 9 10 type OnTreeEvictedFunc func(tree *trie.MTrie) 11 12 // TrieCache caches tries into memory, it acts as a fifo queue 13 // so when it reaches to the capacity it would evict the oldest trie 14 // from the cache. 15 // 16 // Under the hood it uses a circular buffer 17 // of mtrie pointers and a map of rootHash to cache index for fast lookup 18 type TrieCache struct { 19 tries []*trie.MTrie 20 lookup map[ledger.RootHash]int // index to item 21 lock sync.RWMutex 22 capacity int 23 tail int // element index to write to 24 count int // number of elements (count <= capacity) 25 onTreeEvicted OnTreeEvictedFunc 26 } 27 28 // NewTrieCache returns a new TrieCache with given capacity. 29 func NewTrieCache(capacity uint, onTreeEvicted OnTreeEvictedFunc) *TrieCache { 30 return &TrieCache{ 31 tries: make([]*trie.MTrie, capacity), 32 lookup: make(map[ledger.RootHash]int, capacity), 33 lock: sync.RWMutex{}, 34 capacity: int(capacity), 35 tail: 0, 36 count: 0, 37 onTreeEvicted: onTreeEvicted, 38 } 39 } 40 41 // Purge removes all mtries stored in the buffer 42 func (tc *TrieCache) Purge() { 43 tc.lock.Lock() 44 defer tc.lock.Unlock() 45 46 if tc.count == 0 { 47 return 48 } 49 50 toEvict := 0 51 for i := 0; i < tc.capacity; i++ { 52 toEvict = (tc.tail + i) % tc.capacity 53 if tc.onTreeEvicted != nil { 54 if tc.tries[toEvict] != nil { 55 tc.onTreeEvicted(tc.tries[toEvict]) 56 } 57 } 58 tc.tries[toEvict] = nil 59 } 60 tc.tail = 0 61 tc.count = 0 62 tc.lookup = make(map[ledger.RootHash]int, tc.capacity) 63 } 64 65 // Tries returns elements in queue, starting from the oldest element 66 // to the newest element. 67 func (tc *TrieCache) Tries() []*trie.MTrie { 68 tc.lock.RLock() 69 defer tc.lock.RUnlock() 70 71 if tc.count == 0 { 72 return nil 73 } 74 75 tries := make([]*trie.MTrie, tc.count) 76 77 if tc.tail >= tc.count { // Data isn't wrapped around the slice. 78 head := tc.tail - tc.count 79 copy(tries, tc.tries[head:tc.tail]) 80 } else { // q.tail < q.count, data is wrapped around the slice. 81 // This branch isn't used until TrieQueue supports Pop (removing oldest element). 82 // At this time, there is no reason to implement Pop, so this branch is here to prevent future bug. 83 head := tc.capacity - tc.count + tc.tail 84 n := copy(tries, tc.tries[head:]) 85 copy(tries[n:], tc.tries[:tc.tail]) 86 } 87 88 return tries 89 } 90 91 // Push pushes trie to queue. If queue is full, it overwrites the oldest element. 92 func (tc *TrieCache) Push(t *trie.MTrie) { 93 tc.lock.Lock() 94 defer tc.lock.Unlock() 95 96 // if its full 97 if tc.count == tc.capacity { 98 oldtrie := tc.tries[tc.tail] 99 if tc.onTreeEvicted != nil { 100 tc.onTreeEvicted(oldtrie) 101 } 102 delete(tc.lookup, oldtrie.RootHash()) 103 tc.count-- // so when we increment at the end of method we don't go beyond capacity 104 } 105 tc.tries[tc.tail] = t 106 tc.lookup[t.RootHash()] = tc.tail 107 tc.tail = (tc.tail + 1) % tc.capacity 108 tc.count++ 109 } 110 111 // LastAddedTrie returns the last trie added to the cache 112 func (tc *TrieCache) LastAddedTrie() *trie.MTrie { 113 tc.lock.RLock() 114 defer tc.lock.RUnlock() 115 116 if tc.count == 0 { 117 return nil 118 } 119 indx := tc.tail - 1 120 if indx < 0 { 121 indx = tc.capacity - 1 122 } 123 return tc.tries[indx] 124 } 125 126 // Get returns the trie by rootHash, if not exist will return nil and false 127 func (tc *TrieCache) Get(rootHash ledger.RootHash) (*trie.MTrie, bool) { 128 tc.lock.RLock() 129 defer tc.lock.RUnlock() 130 131 idx, found := tc.lookup[rootHash] 132 if !found { 133 return nil, false 134 } 135 return tc.tries[idx], true 136 } 137 138 // Count returns number of items stored in the cache 139 func (tc *TrieCache) Count() int { 140 tc.lock.RLock() 141 defer tc.lock.RUnlock() 142 143 return tc.count 144 }