github.com/zuoyebang/bitalosdb@v1.1.1-0.20240516111551-79a8c4d8ce20/mem_table.go (about)

     1  // Copyright 2021 The Bitalosdb author(hustxrb@163.com) and other contributors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package bitalosdb
    16  
    17  import (
    18  	"bytes"
    19  	"fmt"
    20  	"os"
    21  	"sync/atomic"
    22  
    23  	"github.com/cockroachdb/errors"
    24  
    25  	"github.com/zuoyebang/bitalosdb/internal/arenaskl"
    26  	"github.com/zuoyebang/bitalosdb/internal/base"
    27  	"github.com/zuoyebang/bitalosdb/internal/consts"
    28  )
    29  
    30  func memTableEntrySize(keyBytes, valueBytes int) uint64 {
    31  	return uint64(arenaskl.MaxNodeSize(uint32(keyBytes)+8, uint32(valueBytes)))
    32  }
    33  
    34  var memTableEmptySize = func() uint32 {
    35  	var pointSkl arenaskl.Skiplist
    36  	arena := arenaskl.NewArena(make([]byte, 16<<10))
    37  	pointSkl.Reset(arena, bytes.Compare)
    38  	return arena.Size()
    39  }()
    40  
    41  type memTable struct {
    42  	cmp        Compare
    43  	equal      Equal
    44  	arenaBuf   []byte
    45  	skl        arenaskl.Skiplist
    46  	reserved   uint32
    47  	writerRefs atomic.Int32
    48  	logSeqNum  uint64
    49  	logger     base.Logger
    50  	size       uint32
    51  	delCnt     atomic.Uint32
    52  	totalCnt   atomic.Uint32
    53  }
    54  
    55  type memTableOptions struct {
    56  	*Options
    57  	arenaBuf  []byte
    58  	size      int
    59  	logSeqNum uint64
    60  }
    61  
    62  func checkMemTable(obj interface{}) {
    63  	m := obj.(*memTable)
    64  	if m.arenaBuf != nil {
    65  		fmt.Fprintf(os.Stderr, "%p: memTable buffer was not freed\n", m.arenaBuf)
    66  		os.Exit(1)
    67  	}
    68  }
    69  
    70  func newMemTable(opts memTableOptions) *memTable {
    71  	if opts.size == 0 {
    72  		opts.size = opts.MemTableSize
    73  	}
    74  
    75  	m := &memTable{
    76  		cmp:       opts.Comparer.Compare,
    77  		equal:     opts.Comparer.Equal,
    78  		arenaBuf:  opts.arenaBuf,
    79  		logSeqNum: opts.logSeqNum,
    80  		logger:    opts.Logger,
    81  		size:      uint32(opts.size),
    82  	}
    83  	m.writerRefs.Store(1)
    84  
    85  	if m.arenaBuf == nil {
    86  		m.arenaBuf = make([]byte, opts.size)
    87  	}
    88  
    89  	arena := arenaskl.NewArena(m.arenaBuf)
    90  	m.skl.Reset(arena, m.cmp)
    91  	return m
    92  }
    93  
    94  func (m *memTable) writerRef() {
    95  	m.writerRefs.Add(1)
    96  }
    97  
    98  func (m *memTable) writerUnref() bool {
    99  	switch v := m.writerRefs.Add(-1); {
   100  	case v < 0:
   101  		m.logger.Errorf("panic: memTable inconsistent reference count: %d\n", v)
   102  		return false
   103  	case v == 0:
   104  		return true
   105  	default:
   106  		return false
   107  	}
   108  }
   109  
   110  func (m *memTable) readyForFlush() bool {
   111  	return m.writerRefs.Load() == 0
   112  }
   113  
   114  func (m *memTable) get(key []byte) ([]byte, bool, base.InternalKeyKind) {
   115  	return m.skl.Get(key)
   116  }
   117  
   118  func (m *memTable) set(key InternalKey, value []byte) error {
   119  	return m.skl.Add(key, value)
   120  }
   121  
   122  func (m *memTable) prepare(batch *BatchBitower, checkDelPercent bool) error {
   123  	avail := m.availBytes()
   124  	if batch.memTableSize > uint64(avail) {
   125  		return arenaskl.ErrArenaFull
   126  	}
   127  
   128  	if checkDelPercent {
   129  		delPercent := m.deletePercent()
   130  		inuseSize := m.inuseBytes()
   131  		if consts.CheckFlushDelPercent(delPercent, inuseSize, uint64(m.size)) {
   132  			m.logger.Infof("memtable delete percent exceed inuse:%d size:%d delPercent:%.2f ", inuseSize, m.size, delPercent)
   133  			return errMemExceedDelPercent
   134  		}
   135  	}
   136  
   137  	m.reserved += uint32(batch.memTableSize)
   138  	m.writerRef()
   139  	return nil
   140  }
   141  
   142  func (m *memTable) apply(batch *BatchBitower, seqNum uint64) error {
   143  	if seqNum < m.logSeqNum {
   144  		return errors.Errorf("bitalosdb: batch seqnum %d is less than memtable creation seqnum %d", seqNum, m.logSeqNum)
   145  	}
   146  
   147  	var err error
   148  	var ins arenaskl.Inserter
   149  
   150  	startSeqNum := seqNum
   151  	for r := batch.Reader(); ; seqNum++ {
   152  		kind, ukey, value, ok := r.Next()
   153  		if !ok {
   154  			break
   155  		}
   156  
   157  		switch kind {
   158  		case InternalKeyKindLogData:
   159  			seqNum--
   160  		default:
   161  			ikey := base.MakeInternalKey(ukey, seqNum, kind)
   162  			err = ins.Add(&m.skl, ikey, value)
   163  		}
   164  		if err != nil {
   165  			return err
   166  		}
   167  
   168  		m.totalCnt.Add(1)
   169  		if kind == InternalKeyKindDelete {
   170  			m.delCnt.Add(1)
   171  		}
   172  	}
   173  	if seqNum != startSeqNum+uint64(batch.Count()) {
   174  		return errors.Errorf("bitalosdb: inconsistent batch count: %d vs %d", seqNum, startSeqNum+uint64(batch.Count()))
   175  	}
   176  	return nil
   177  }
   178  
   179  func (m *memTable) newIter(o *IterOptions) internalIterator {
   180  	return m.skl.NewIter(o.GetLowerBound(), o.GetUpperBound())
   181  }
   182  
   183  func (m *memTable) newFlushIter(o *IterOptions, bytesFlushed *uint64) internalIterator {
   184  	return m.skl.NewFlushIter(bytesFlushed)
   185  }
   186  
   187  func (m *memTable) availBytes() uint32 {
   188  	a := m.skl.Arena()
   189  	if m.writerRefs.Load() == 1 {
   190  		m.reserved = a.Size()
   191  	}
   192  	return a.Capacity() - m.reserved
   193  }
   194  
   195  func (m *memTable) inuseBytes() uint64 {
   196  	return uint64(m.skl.Size() - memTableEmptySize)
   197  }
   198  
   199  func (m *memTable) totalBytes() uint64 {
   200  	return uint64(m.skl.Arena().Capacity())
   201  }
   202  
   203  func (m *memTable) deletePercent() float64 {
   204  	delCnt := m.delCnt.Load()
   205  	if delCnt == 0 {
   206  		return 0
   207  	}
   208  	return float64(delCnt) / float64(m.totalCnt.Load())
   209  }
   210  
   211  func (m *memTable) close() error {
   212  	return nil
   213  }
   214  
   215  func (m *memTable) empty() bool {
   216  	return m.skl.Size() <= memTableEmptySize
   217  }