github.com/zuoyebang/bitalosdb@v1.1.1-0.20240516111551-79a8c4d8ce20/internal/cache/lfucache/mem_table.go (about) 1 // Copyright 2021 The Bitalosdb author(hustxrb@163.com) and other contributors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package lfucache 16 17 import ( 18 "fmt" 19 "os" 20 "sync/atomic" 21 22 arenaskl2 "github.com/zuoyebang/bitalosdb/internal/cache/lfucache/internal/arenaskl" 23 base2 "github.com/zuoyebang/bitalosdb/internal/cache/lfucache/internal/base" 24 "github.com/zuoyebang/bitalosdb/internal/manual" 25 ) 26 27 func memTableEntrySize(keyBytes, valueBytes int) uint64 { 28 return uint64(arenaskl2.MaxNodeSize(uint32(keyBytes)+8, uint32(valueBytes))) 29 } 30 31 var memTableEmptySize = func() uint32 { 32 var pointSkl arenaskl2.Skiplist 33 arena := arenaskl2.NewArena(make([]byte, 16<<10)) 34 pointSkl.Reset(arena) 35 return arena.Size() 36 }() 37 38 type memTable struct { 39 id int64 40 arenaBuf []byte 41 skl arenaskl2.Skiplist 42 size int 43 reserved uint32 44 writerRefs int32 45 num int 46 } 47 48 func checkMemTable(obj interface{}) { 49 m := obj.(*memTable) 50 if m.arenaBuf != nil { 51 fmt.Fprintf(os.Stderr, "%p: memTable(%d) buffer was not freed\n", m.arenaBuf, m.id) 52 os.Exit(1) 53 } 54 } 55 56 func newMemTable(id int64, size int) *memTable { 57 m := &memTable{ 58 id: id, 59 arenaBuf: manual.New(size), 60 size: size, 61 writerRefs: 1, 62 } 63 64 m.skl.Reset(arenaskl2.NewArena(m.arenaBuf)) 65 66 return m 67 } 68 69 func (m *memTable) writerRef() { 70 switch v := atomic.AddInt32(&m.writerRefs, 1); { 71 case v <= 1: 72 panic(fmt.Sprintf("mcache: inconsistent reference count: %d", v)) 73 } 74 } 75 76 func (m *memTable) writerUnref() bool { 77 switch v := atomic.AddInt32(&m.writerRefs, -1); { 78 case v < 0: 79 panic(fmt.Sprintf("mcache: inconsistent reference count: %d", v)) 80 case v == 0: 81 return true 82 default: 83 return false 84 } 85 } 86 87 func (m *memTable) readyForFlush() bool { 88 return atomic.LoadInt32(&m.writerRefs) == 0 89 } 90 91 func (m *memTable) prepare(kvSize uint64) error { 92 avail := m.availBytes() 93 if kvSize > uint64(avail) { 94 return arenaskl2.ErrArenaFull 95 } 96 m.reserved += uint32(kvSize) 97 return nil 98 } 99 100 func (m *memTable) add(key []byte, value []byte, seqNum uint64, kind internalKeyKind) (err error) { 101 ikey := base2.MakeInternalKey(key, seqNum, kind) 102 var ins arenaskl2.Inserter 103 err = ins.Add(&m.skl, ikey, value) 104 if err == nil { 105 m.num++ 106 } 107 108 return err 109 } 110 111 func (m *memTable) get(key []byte) ([]byte, bool, internalKeyKind) { 112 return m.skl.Get(key) 113 } 114 115 func (m *memTable) newIter(o *iterOptions) internalIterator { 116 return m.skl.NewIter(o.GetLowerBound(), o.GetUpperBound()) 117 } 118 119 func (m *memTable) newFlushIter(o *iterOptions, bytesFlushed *uint64) internalIterator { 120 return m.skl.NewFlushIter(bytesFlushed) 121 } 122 123 func (m *memTable) availBytes() uint32 { 124 a := m.skl.Arena() 125 if atomic.LoadInt32(&m.writerRefs) == 1 { 126 m.reserved = a.Size() 127 } 128 return a.Capacity() - m.reserved 129 } 130 131 func (m *memTable) inuseBytes() uint64 { 132 return uint64(m.skl.Size()) 133 } 134 135 func (m *memTable) totalBytes() uint64 { 136 return uint64(m.skl.Arena().Capacity()) 137 } 138 139 func (m *memTable) close() error { 140 return nil 141 } 142 143 func (m *memTable) empty() bool { 144 return m.skl.Size() == memTableEmptySize 145 } 146 147 func (m *memTable) getID() int64 { 148 return m.id 149 } 150 151 func (m *memTable) count() int { 152 return m.num 153 }