github.com/pingcap/badger@v1.5.1-0.20230103063557-828f39b09b6d/table/memtable/table.go (about)

     1  package memtable
     2  
     3  import (
     4  	"bytes"
     5  	"sort"
     6  	"sync/atomic"
     7  	"unsafe"
     8  
     9  	"github.com/pingcap/badger/table"
    10  	"github.com/pingcap/badger/y"
    11  )
    12  
    13  type Entry struct {
    14  	Key   []byte
    15  	Value y.ValueStruct
    16  }
    17  
    18  func (e *Entry) EstimateSize() int64 {
    19  	return int64(len(e.Key) + int(e.Value.EncodedSize()) + EstimateNodeSize)
    20  }
    21  
    22  type Table struct {
    23  	skl         *skiplist
    24  	id          uint64
    25  	pendingList unsafe.Pointer // *listNode
    26  	compacting  int32
    27  }
    28  
    29  func New(arenaSize int64, id uint64) *Table {
    30  	return &Table{
    31  		skl: newSkiplist(arenaSize),
    32  		id:  id,
    33  	}
    34  }
    35  
    36  func (t *Table) ID() uint64 {
    37  	return t.id
    38  }
    39  
    40  func (t *Table) Delete() error {
    41  	t.skl.Delete()
    42  	return nil
    43  }
    44  
    45  func (t *Table) Close() error {
    46  	return nil
    47  }
    48  
    49  func (t *Table) Empty() bool {
    50  	return atomic.LoadPointer(&t.pendingList) == nil && t.skl.Empty()
    51  }
    52  
    53  func (t *Table) Get(key y.Key, keyHash uint64) (y.ValueStruct, error) {
    54  	curr := (*listNode)(atomic.LoadPointer(&t.pendingList))
    55  	for curr != nil {
    56  		if v, ok := curr.get(key); ok {
    57  			return v, nil
    58  		}
    59  		curr = (*listNode)(atomic.LoadPointer(&curr.next))
    60  	}
    61  	return t.skl.Get(key.UserKey, key.Version), nil
    62  }
    63  
    64  func (t *Table) NewIterator(reverse bool) y.Iterator {
    65  	var (
    66  		sklItr = t.skl.NewUniIterator(reverse)
    67  		its    []y.Iterator
    68  	)
    69  	curr := (*listNode)(atomic.LoadPointer(&t.pendingList))
    70  	for curr != nil {
    71  		its = append(its, curr.newIterator(reverse))
    72  		curr = (*listNode)(atomic.LoadPointer(&curr.next))
    73  	}
    74  
    75  	if len(its) == 0 {
    76  		return sklItr
    77  	}
    78  	its = append(its, sklItr)
    79  	return table.NewMergeIterator(its, reverse)
    80  }
    81  
    82  func (t *Table) Size() int64 {
    83  	var sz int64
    84  	curr := (*listNode)(atomic.LoadPointer(&t.pendingList))
    85  	for curr != nil {
    86  		sz += curr.memSize
    87  		curr = (*listNode)(atomic.LoadPointer(&curr.next))
    88  	}
    89  	return t.skl.MemSize() + sz
    90  }
    91  
    92  func (t *Table) Smallest() y.Key {
    93  	it := t.NewIterator(false)
    94  	it.Rewind()
    95  	return it.Key()
    96  }
    97  
    98  func (t *Table) Biggest() y.Key {
    99  	it := t.NewIterator(true)
   100  	it.Rewind()
   101  	return it.Key()
   102  }
   103  
   104  func (t *Table) HasOverlap(start, end y.Key, includeEnd bool) bool {
   105  	it := t.NewIterator(false)
   106  	it.Seek(start.UserKey)
   107  	if !it.Valid() {
   108  		return false
   109  	}
   110  	if cmp := it.Key().Compare(end); cmp > 0 {
   111  		return false
   112  	} else if cmp == 0 {
   113  		return includeEnd
   114  	}
   115  	return true
   116  }
   117  
   118  func (t *Table) MarkCompacting(flag bool) {
   119  	if flag {
   120  		atomic.StoreInt32(&t.compacting, 1)
   121  	}
   122  	atomic.StoreInt32(&t.compacting, 0)
   123  }
   124  
   125  func (t *Table) IsCompacting() bool {
   126  	return atomic.LoadInt32(&t.compacting) == 1
   127  }
   128  
   129  // PutToSkl directly insert entry into SkipList.
   130  func (t *Table) PutToSkl(key []byte, v y.ValueStruct) {
   131  	t.skl.Put(key, v)
   132  }
   133  
   134  // PutToPendingList put entries to pending list, and you can call MergeListToSkl to merge them to SkipList later.
   135  func (t *Table) PutToPendingList(entries []Entry) {
   136  	t.putToList(entries)
   137  }
   138  
   139  // MergeListToSkl merge all entries in pending list to SkipList.
   140  func (t *Table) MergeListToSkl() {
   141  	head := (*listNode)(atomic.LoadPointer(&t.pendingList))
   142  	if head == nil {
   143  		return
   144  	}
   145  
   146  	head.mergeToSkl(t.skl)
   147  	// No new node inserted, just update head of list.
   148  	if atomic.CompareAndSwapPointer(&t.pendingList, unsafe.Pointer(head), nil) {
   149  		return
   150  	}
   151  	// New node inserted, iterate to find `prev` of old head.
   152  	curr := (*listNode)(atomic.LoadPointer(&t.pendingList))
   153  	for curr != nil {
   154  		next := atomic.LoadPointer(&curr.next)
   155  		if unsafe.Pointer(head) == next {
   156  			atomic.StorePointer(&curr.next, nil)
   157  			return
   158  		}
   159  		curr = (*listNode)(next)
   160  	}
   161  }
   162  
   163  func (t *Table) putToList(entries []Entry) {
   164  	n := newListNode(entries)
   165  	for {
   166  		old := atomic.LoadPointer(&t.pendingList)
   167  		n.next = old
   168  		if atomic.CompareAndSwapPointer(&t.pendingList, old, unsafe.Pointer(n)) {
   169  			return
   170  		}
   171  	}
   172  }
   173  
   174  type listNode struct {
   175  	next       unsafe.Pointer // *listNode
   176  	entries    []Entry
   177  	latestOffs []uint32
   178  	memSize    int64
   179  }
   180  
   181  func newListNode(entries []Entry) *listNode {
   182  	n := &listNode{entries: entries, latestOffs: make([]uint32, 0, len(entries))}
   183  	var curKey []byte
   184  	for i, e := range n.entries {
   185  		sz := e.EstimateSize()
   186  		n.memSize += sz
   187  		if !bytes.Equal(e.Key, curKey) {
   188  			n.latestOffs = append(n.latestOffs, uint32(i))
   189  			curKey = e.Key
   190  		}
   191  	}
   192  	return n
   193  }
   194  
   195  func (n *listNode) putToSkl(s *skiplist, entries []Entry) {
   196  	var h hint
   197  	for _, e := range entries {
   198  		s.PutWithHint(e.Key, e.Value, &h)
   199  	}
   200  }
   201  
   202  func (n *listNode) mergeToSkl(skl *skiplist) {
   203  	next := (*listNode)(atomic.LoadPointer(&n.next))
   204  	if next != nil {
   205  		next.mergeToSkl(skl)
   206  	}
   207  	atomic.StorePointer(&n.next, nil)
   208  	n.putToSkl(skl, n.entries)
   209  }
   210  
   211  func (n *listNode) get(key y.Key) (y.ValueStruct, bool) {
   212  	i := sort.Search(len(n.entries), func(i int) bool {
   213  		e := n.entries[i]
   214  		return y.KeyWithTs(e.Key, e.Value.Version).Compare(key) >= 0
   215  	})
   216  	if i < len(n.entries) && bytes.Equal(key.UserKey, n.entries[i].Key) {
   217  		return n.entries[i].Value, true
   218  	}
   219  	return y.ValueStruct{}, false
   220  }
   221  
   222  func (n *listNode) newIterator(reverse bool) *listNodeIterator {
   223  	return &listNodeIterator{reversed: reverse, n: n}
   224  }
   225  
   226  type listNodeIterator struct {
   227  	idx      int
   228  	verIdx   uint32
   229  	n        *listNode
   230  	reversed bool
   231  }
   232  
   233  func (it *listNodeIterator) Next() {
   234  	if !it.reversed {
   235  		it.idx++
   236  		it.verIdx = 0
   237  	} else {
   238  		it.idx--
   239  		it.verIdx = 0
   240  	}
   241  }
   242  
   243  func (it *listNodeIterator) NextVersion() bool {
   244  	nextKeyEntryOff := uint32(len(it.n.entries))
   245  	if it.idx+1 < len(it.n.latestOffs) {
   246  		nextKeyEntryOff = it.n.latestOffs[it.idx+1]
   247  	}
   248  	if it.getEntryIdx()+1 < nextKeyEntryOff {
   249  		it.verIdx++
   250  		return true
   251  	}
   252  	return false
   253  }
   254  
   255  func (it *listNodeIterator) getEntryIdx() uint32 {
   256  	return it.n.latestOffs[it.idx] + it.verIdx
   257  }
   258  
   259  func (it *listNodeIterator) Rewind() {
   260  	if !it.reversed {
   261  		it.idx = 0
   262  		it.verIdx = 0
   263  	} else {
   264  		it.idx = len(it.n.latestOffs) - 1
   265  		it.verIdx = 0
   266  	}
   267  }
   268  
   269  func (it *listNodeIterator) Seek(key []byte) {
   270  	it.idx = sort.Search(len(it.n.latestOffs), func(i int) bool {
   271  		e := &it.n.entries[it.n.latestOffs[i]]
   272  		return bytes.Compare(e.Key, key) >= 0
   273  	})
   274  	it.verIdx = 0
   275  	if it.reversed {
   276  		if !it.Valid() || !bytes.Equal(it.Key().UserKey, key) {
   277  			it.idx--
   278  		}
   279  	}
   280  }
   281  
   282  func (it *listNodeIterator) Key() y.Key {
   283  	e := &it.n.entries[it.getEntryIdx()]
   284  	return y.KeyWithTs(e.Key, e.Value.Version)
   285  }
   286  
   287  func (it *listNodeIterator) Value() y.ValueStruct { return it.n.entries[it.idx].Value }
   288  
   289  func (it *listNodeIterator) FillValue(vs *y.ValueStruct) { *vs = it.Value() }
   290  
   291  func (it *listNodeIterator) Valid() bool { return it.idx >= 0 && it.idx < len(it.n.latestOffs) }
   292  
   293  func (it *listNodeIterator) Close() error { return nil }