github.com/nutsdb/nutsdb@v1.0.4/db.go (about)

     1  // Copyright 2019 The nutsdb Author. All rights reserved.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package nutsdb
    16  
    17  import (
    18  	"bytes"
    19  	"errors"
    20  	"fmt"
    21  	"io"
    22  	"log"
    23  	"os"
    24  	"path"
    25  	"path/filepath"
    26  	"runtime"
    27  	"sort"
    28  	"strings"
    29  	"sync"
    30  	"time"
    31  
    32  	"github.com/gofrs/flock"
    33  	"github.com/xujiajun/utils/filesystem"
    34  	"github.com/xujiajun/utils/strconv2"
    35  )
    36  
    37  // ScanNoLimit represents the data scan no limit flag
    38  const ScanNoLimit int = -1
    39  const KvWriteChCapacity = 1000
    40  const FLockName = "nutsdb-flock"
    41  
    42  type (
    43  	// DB represents a collection of buckets that persist on disk.
    44  	DB struct {
    45  		opt                     Options // the database options
    46  		Index                   *index
    47  		ActiveFile              *DataFile
    48  		MaxFileID               int64
    49  		mu                      sync.RWMutex
    50  		KeyCount                int // total key number ,include expired, deleted, repeated.
    51  		closed                  bool
    52  		isMerging               bool
    53  		fm                      *fileManager
    54  		flock                   *flock.Flock
    55  		commitBuffer            *bytes.Buffer
    56  		mergeStartCh            chan struct{}
    57  		mergeEndCh              chan error
    58  		mergeWorkCloseCh        chan struct{}
    59  		writeCh                 chan *request
    60  		tm                      *ttlManager
    61  		RecordCount             int64 // current valid record count, exclude deleted, repeated
    62  		bm                      *BucketManager
    63  		hintKeyAndRAMIdxModeLru *LRUCache // lru cache for HintKeyAndRAMIdxMode
    64  	}
    65  )
    66  
    67  // open returns a newly initialized DB object.
    68  func open(opt Options) (*DB, error) {
    69  	db := &DB{
    70  		MaxFileID:               0,
    71  		opt:                     opt,
    72  		KeyCount:                0,
    73  		closed:                  false,
    74  		Index:                   newIndex(),
    75  		fm:                      newFileManager(opt.RWMode, opt.MaxFdNumsInCache, opt.CleanFdsCacheThreshold, opt.SegmentSize),
    76  		mergeStartCh:            make(chan struct{}),
    77  		mergeEndCh:              make(chan error),
    78  		mergeWorkCloseCh:        make(chan struct{}),
    79  		writeCh:                 make(chan *request, KvWriteChCapacity),
    80  		tm:                      newTTLManager(opt.ExpiredDeleteType),
    81  		hintKeyAndRAMIdxModeLru: NewLruCache(opt.HintKeyAndRAMIdxCacheSize),
    82  	}
    83  
    84  	db.commitBuffer = createNewBufferWithSize(int(db.opt.CommitBufferSize))
    85  
    86  	if ok := filesystem.PathIsExist(db.opt.Dir); !ok {
    87  		if err := os.MkdirAll(db.opt.Dir, os.ModePerm); err != nil {
    88  			return nil, err
    89  		}
    90  	}
    91  
    92  	fileLock := flock.New(filepath.Join(opt.Dir, FLockName))
    93  	if ok, err := fileLock.TryLock(); err != nil {
    94  		return nil, err
    95  	} else if !ok {
    96  		return nil, ErrDirLocked
    97  	}
    98  
    99  	db.flock = fileLock
   100  
   101  	if bm, err := NewBucketManager(opt.Dir); err == nil {
   102  		db.bm = bm
   103  	} else {
   104  		return nil, err
   105  	}
   106  
   107  	if err := db.rebuildBucketManager(); err != nil {
   108  		return nil, fmt.Errorf("db.rebuildBucketManager err:%s", err)
   109  	}
   110  
   111  	if err := db.buildIndexes(); err != nil {
   112  		return nil, fmt.Errorf("db.buildIndexes error: %s", err)
   113  	}
   114  
   115  	go db.mergeWorker()
   116  	go db.doWrites()
   117  	go db.tm.run()
   118  
   119  	return db, nil
   120  }
   121  
   122  // Open returns a newly initialized DB object with Option.
   123  func Open(options Options, ops ...Option) (*DB, error) {
   124  	opts := &options
   125  	for _, do := range ops {
   126  		do(opts)
   127  	}
   128  	return open(*opts)
   129  }
   130  
   131  // Update executes a function within a managed read/write transaction.
   132  func (db *DB) Update(fn func(tx *Tx) error) error {
   133  	if fn == nil {
   134  		return ErrFn
   135  	}
   136  
   137  	return db.managed(true, fn)
   138  }
   139  
   140  // View executes a function within a managed read-only transaction.
   141  func (db *DB) View(fn func(tx *Tx) error) error {
   142  	if fn == nil {
   143  		return ErrFn
   144  	}
   145  
   146  	return db.managed(false, fn)
   147  }
   148  
   149  // Backup copies the database to file directory at the given dir.
   150  func (db *DB) Backup(dir string) error {
   151  	return db.View(func(tx *Tx) error {
   152  		return filesystem.CopyDir(db.opt.Dir, dir)
   153  	})
   154  }
   155  
   156  // BackupTarGZ Backup copy the database to writer.
   157  func (db *DB) BackupTarGZ(w io.Writer) error {
   158  	return db.View(func(tx *Tx) error {
   159  		return tarGZCompress(w, db.opt.Dir)
   160  	})
   161  }
   162  
   163  // Close releases all db resources.
   164  func (db *DB) Close() error {
   165  	db.mu.Lock()
   166  	defer db.mu.Unlock()
   167  
   168  	if db.closed {
   169  		return ErrDBClosed
   170  	}
   171  
   172  	db.closed = true
   173  
   174  	err := db.release()
   175  	if err != nil {
   176  		return err
   177  	}
   178  
   179  	return nil
   180  }
   181  
   182  // release set all obj in the db instance to nil
   183  func (db *DB) release() error {
   184  	GCEnable := db.opt.GCWhenClose
   185  
   186  	err := db.ActiveFile.rwManager.Release()
   187  	if err != nil {
   188  		return err
   189  	}
   190  
   191  	db.Index = nil
   192  
   193  	db.ActiveFile = nil
   194  
   195  	err = db.fm.close()
   196  
   197  	if err != nil {
   198  		return err
   199  	}
   200  
   201  	db.mergeWorkCloseCh <- struct{}{}
   202  
   203  	if !db.flock.Locked() {
   204  		return ErrDirUnlocked
   205  	}
   206  
   207  	err = db.flock.Unlock()
   208  	if err != nil {
   209  		return err
   210  	}
   211  
   212  	db.fm = nil
   213  
   214  	db.tm.close()
   215  
   216  	if GCEnable {
   217  		runtime.GC()
   218  	}
   219  
   220  	return nil
   221  }
   222  
   223  func (db *DB) getValueByRecord(record *Record) ([]byte, error) {
   224  	if record == nil {
   225  		return nil, ErrRecordIsNil
   226  	}
   227  
   228  	if record.Value != nil {
   229  		return record.Value, nil
   230  	}
   231  
   232  	// firstly we find data in cache
   233  	if db.getHintKeyAndRAMIdxCacheSize() > 0 {
   234  		if value := db.hintKeyAndRAMIdxModeLru.Get(record); value != nil {
   235  			return value.(*Entry).Value, nil
   236  		}
   237  	}
   238  
   239  	dirPath := getDataPath(record.FileID, db.opt.Dir)
   240  	df, err := db.fm.getDataFile(dirPath, db.opt.SegmentSize)
   241  	if err != nil {
   242  		return nil, err
   243  	}
   244  	defer func(rwManager RWManager) {
   245  		err := rwManager.Release()
   246  		if err != nil {
   247  			return
   248  		}
   249  	}(df.rwManager)
   250  
   251  	payloadSize := int64(len(record.Key)) + int64(record.ValueSize)
   252  	item, err := df.ReadEntry(int(record.DataPos), payloadSize)
   253  	if err != nil {
   254  		return nil, fmt.Errorf("read err. pos %d, key %s, err %s", record.DataPos, record.Key, err)
   255  	}
   256  
   257  	// saved in cache
   258  	if db.getHintKeyAndRAMIdxCacheSize() > 0 {
   259  		db.hintKeyAndRAMIdxModeLru.Add(record, item)
   260  	}
   261  
   262  	return item.Value, nil
   263  }
   264  
   265  func (db *DB) commitTransaction(tx *Tx) error {
   266  	var err error
   267  	defer func() {
   268  		var panicked bool
   269  		if r := recover(); r != nil {
   270  			// resume normal execution
   271  			panicked = true
   272  		}
   273  		if panicked || err != nil {
   274  			// log.Fatal("panicked=", panicked, ", err=", err)
   275  			if errRollback := tx.Rollback(); errRollback != nil {
   276  				err = errRollback
   277  			}
   278  		}
   279  	}()
   280  
   281  	// commit current tx
   282  	tx.lock()
   283  	tx.setStatusRunning()
   284  	err = tx.Commit()
   285  	if err != nil {
   286  		// log.Fatal("txCommit fail,err=", err)
   287  		return err
   288  	}
   289  
   290  	return err
   291  }
   292  
   293  func (db *DB) writeRequests(reqs []*request) error {
   294  	var err error
   295  	if len(reqs) == 0 {
   296  		return nil
   297  	}
   298  
   299  	done := func(err error) {
   300  		for _, r := range reqs {
   301  			r.Err = err
   302  			r.Wg.Done()
   303  		}
   304  	}
   305  
   306  	for _, req := range reqs {
   307  		tx := req.tx
   308  		cerr := db.commitTransaction(tx)
   309  		if cerr != nil {
   310  			err = cerr
   311  		}
   312  	}
   313  
   314  	done(err)
   315  	return err
   316  }
   317  
   318  // MaxBatchCount returns max possible entries in batch
   319  func (db *DB) getMaxBatchCount() int64 {
   320  	return db.opt.MaxBatchCount
   321  }
   322  
   323  // MaxBatchSize returns max possible batch size
   324  func (db *DB) getMaxBatchSize() int64 {
   325  	return db.opt.MaxBatchSize
   326  }
   327  
   328  func (db *DB) getMaxWriteRecordCount() int64 {
   329  	return db.opt.MaxWriteRecordCount
   330  }
   331  
   332  func (db *DB) getHintKeyAndRAMIdxCacheSize() int {
   333  	return db.opt.HintKeyAndRAMIdxCacheSize
   334  }
   335  
   336  func (db *DB) doWrites() {
   337  	pendingCh := make(chan struct{}, 1)
   338  	writeRequests := func(reqs []*request) {
   339  		if err := db.writeRequests(reqs); err != nil {
   340  			log.Fatal("writeRequests fail, err=", err)
   341  		}
   342  		<-pendingCh
   343  	}
   344  
   345  	reqs := make([]*request, 0, 10)
   346  	var r *request
   347  	var ok bool
   348  	for {
   349  		r, ok = <-db.writeCh
   350  		if !ok {
   351  			goto closedCase
   352  		}
   353  
   354  		for {
   355  			reqs = append(reqs, r)
   356  
   357  			if len(reqs) >= 3*KvWriteChCapacity {
   358  				pendingCh <- struct{}{} // blocking.
   359  				goto writeCase
   360  			}
   361  
   362  			select {
   363  			// Either push to pending, or continue to pick from writeCh.
   364  			case r, ok = <-db.writeCh:
   365  				if !ok {
   366  					goto closedCase
   367  				}
   368  			case pendingCh <- struct{}{}:
   369  				goto writeCase
   370  			}
   371  		}
   372  
   373  	closedCase:
   374  		// All the pending request are drained.
   375  		// Don't close the writeCh, because it has be used in several places.
   376  		for {
   377  			select {
   378  			case r = <-db.writeCh:
   379  				reqs = append(reqs, r)
   380  			default:
   381  				pendingCh <- struct{}{} // Push to pending before doing write.
   382  				writeRequests(reqs)
   383  				return
   384  			}
   385  		}
   386  
   387  	writeCase:
   388  		go writeRequests(reqs)
   389  		reqs = make([]*request, 0, 10)
   390  	}
   391  }
   392  
   393  // setActiveFile sets the ActiveFile (DataFile object).
   394  func (db *DB) setActiveFile() (err error) {
   395  	activeFilePath := getDataPath(db.MaxFileID, db.opt.Dir)
   396  	db.ActiveFile, err = db.fm.getDataFile(activeFilePath, db.opt.SegmentSize)
   397  	if err != nil {
   398  		return
   399  	}
   400  
   401  	db.ActiveFile.fileID = db.MaxFileID
   402  
   403  	return nil
   404  }
   405  
   406  // getMaxFileIDAndFileIds returns max fileId and fileIds.
   407  func (db *DB) getMaxFileIDAndFileIDs() (maxFileID int64, dataFileIds []int) {
   408  	files, _ := os.ReadDir(db.opt.Dir)
   409  
   410  	if len(files) == 0 {
   411  		return 0, nil
   412  	}
   413  
   414  	for _, file := range files {
   415  		filename := file.Name()
   416  		fileSuffix := path.Ext(path.Base(filename))
   417  		if fileSuffix != DataSuffix {
   418  			continue
   419  		}
   420  
   421  		filename = strings.TrimSuffix(filename, DataSuffix)
   422  		id, _ := strconv2.StrToInt(filename)
   423  		dataFileIds = append(dataFileIds, id)
   424  	}
   425  
   426  	if len(dataFileIds) == 0 {
   427  		return 0, nil
   428  	}
   429  
   430  	sort.Ints(dataFileIds)
   431  	maxFileID = int64(dataFileIds[len(dataFileIds)-1])
   432  
   433  	return
   434  }
   435  
   436  func (db *DB) parseDataFiles(dataFileIds []int) (err error) {
   437  	var (
   438  		off      int64
   439  		f        *fileRecovery
   440  		fID      int64
   441  		dataInTx dataInTx
   442  	)
   443  
   444  	parseDataInTx := func() error {
   445  		for _, entry := range dataInTx.es {
   446  			// if this bucket is not existed in bucket manager right now
   447  			// its because it already deleted in the feature WAL log.
   448  			// so we can just ignore here.
   449  			bucketId := entry.Meta.BucketId
   450  			if _, err := db.bm.GetBucketById(bucketId); errors.Is(err, ErrBucketNotExist) {
   451  				continue
   452  			}
   453  
   454  			record := db.createRecordByModeWithFidAndOff(entry.fid, uint64(entry.off), &entry.Entry)
   455  
   456  			if err = db.buildIdxes(record, &entry.Entry); err != nil {
   457  				return err
   458  			}
   459  
   460  			db.KeyCount++
   461  
   462  		}
   463  		return nil
   464  	}
   465  
   466  	readEntriesFromFile := func() error {
   467  		for {
   468  			entry, err := f.readEntry(off)
   469  			if err != nil {
   470  				// whatever which logic branch it will choose, we will release the fd.
   471  				_ = f.release()
   472  				if errors.Is(err, io.EOF) || errors.Is(err, ErrIndexOutOfBound) || errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, ErrEntryZero) || errors.Is(err, ErrHeaderSizeOutOfBounds) {
   473  					break
   474  				}
   475  				if off >= db.opt.SegmentSize {
   476  					break
   477  				}
   478  
   479  				return err
   480  			}
   481  
   482  			if entry == nil {
   483  				break
   484  			}
   485  
   486  			entryWhenRecovery := &EntryWhenRecovery{
   487  				Entry: *entry,
   488  				fid:   fID,
   489  				off:   off,
   490  			}
   491  			if dataInTx.txId == 0 {
   492  				dataInTx.appendEntry(entryWhenRecovery)
   493  				dataInTx.txId = entry.Meta.TxID
   494  				dataInTx.startOff = off
   495  			} else if dataInTx.isSameTx(entryWhenRecovery) {
   496  				dataInTx.appendEntry(entryWhenRecovery)
   497  			}
   498  
   499  			if entry.Meta.Status == Committed {
   500  				err := parseDataInTx()
   501  				if err != nil {
   502  					return err
   503  				}
   504  				dataInTx.reset()
   505  				dataInTx.startOff = off
   506  			}
   507  
   508  			if !dataInTx.isSameTx(entryWhenRecovery) {
   509  				dataInTx.reset()
   510  				dataInTx.startOff = off
   511  			}
   512  
   513  			off += entry.Size()
   514  		}
   515  
   516  		if fID == db.MaxFileID {
   517  			db.ActiveFile.ActualSize = off
   518  			db.ActiveFile.writeOff = off
   519  		}
   520  
   521  		return nil
   522  	}
   523  
   524  	for _, dataID := range dataFileIds {
   525  		off = 0
   526  		fID = int64(dataID)
   527  		dataPath := getDataPath(fID, db.opt.Dir)
   528  		f, err = newFileRecovery(dataPath, db.opt.BufferSizeOfRecovery)
   529  		if err != nil {
   530  			return err
   531  		}
   532  		err := readEntriesFromFile()
   533  		if err != nil {
   534  			return err
   535  		}
   536  	}
   537  
   538  	// compute the valid record count and save it in db.RecordCount
   539  	db.RecordCount, err = db.getRecordCount()
   540  	return
   541  }
   542  
   543  func (db *DB) getRecordCount() (int64, error) {
   544  	var res int64
   545  
   546  	// Iterate through the BTree indices
   547  	for _, btree := range db.Index.bTree.idx {
   548  		res += int64(btree.Count())
   549  	}
   550  
   551  	// Iterate through the List indices
   552  	for _, listItem := range db.Index.list.idx {
   553  		for key := range listItem.Items {
   554  			curLen, err := listItem.Size(key)
   555  			if err != nil {
   556  				return res, err
   557  			}
   558  			res += int64(curLen)
   559  		}
   560  	}
   561  
   562  	// Iterate through the Set indices
   563  	for _, setItem := range db.Index.set.idx {
   564  		for key := range setItem.M {
   565  			res += int64(setItem.SCard(key))
   566  		}
   567  	}
   568  
   569  	// Iterate through the SortedSet indices
   570  	for _, zsetItem := range db.Index.sortedSet.idx {
   571  		for key := range zsetItem.M {
   572  			curLen, err := zsetItem.ZCard(key)
   573  			if err != nil {
   574  				return res, err
   575  			}
   576  			res += int64(curLen)
   577  		}
   578  	}
   579  
   580  	return res, nil
   581  }
   582  
   583  func (db *DB) buildBTreeIdx(record *Record, entry *Entry) error {
   584  	key, meta := entry.Key, entry.Meta
   585  
   586  	bucket, err := db.bm.GetBucketById(meta.BucketId)
   587  	if err != nil {
   588  		return err
   589  	}
   590  	bucketId := bucket.Id
   591  
   592  	bTree := db.Index.bTree.getWithDefault(bucketId)
   593  
   594  	if record.IsExpired() || meta.Flag == DataDeleteFlag {
   595  		db.tm.del(bucketId, string(key))
   596  		bTree.Delete(key)
   597  	} else {
   598  		if meta.TTL != Persistent {
   599  			db.tm.add(bucketId, string(key), db.expireTime(meta.Timestamp, meta.TTL), db.buildExpireCallback(bucket.Name, key))
   600  		} else {
   601  			db.tm.del(bucketId, string(key))
   602  		}
   603  		bTree.Insert(record)
   604  	}
   605  	return nil
   606  }
   607  
   608  func (db *DB) expireTime(timestamp uint64, ttl uint32) time.Duration {
   609  	now := time.UnixMilli(time.Now().UnixMilli())
   610  	expireTime := time.UnixMilli(int64(timestamp))
   611  	expireTime = expireTime.Add(time.Duration(int64(ttl)) * time.Second)
   612  	return expireTime.Sub(now)
   613  }
   614  
   615  func (db *DB) buildIdxes(record *Record, entry *Entry) error {
   616  	meta := entry.Meta
   617  	switch meta.Ds {
   618  	case DataStructureBTree:
   619  		return db.buildBTreeIdx(record, entry)
   620  	case DataStructureList:
   621  		if err := db.buildListIdx(record, entry); err != nil {
   622  			return err
   623  		}
   624  	case DataStructureSet:
   625  		if err := db.buildSetIdx(record, entry); err != nil {
   626  			return err
   627  		}
   628  	case DataStructureSortedSet:
   629  		if err := db.buildSortedSetIdx(record, entry); err != nil {
   630  			return err
   631  		}
   632  	default:
   633  		panic(fmt.Sprintf("there is an unexpected data structure that is unimplemented in our database.:%d", meta.Ds))
   634  	}
   635  	return nil
   636  }
   637  
   638  func (db *DB) deleteBucket(ds uint16, bucket BucketId) {
   639  	if ds == DataStructureSet {
   640  		db.Index.set.delete(bucket)
   641  	}
   642  	if ds == DataStructureSortedSet {
   643  		db.Index.sortedSet.delete(bucket)
   644  	}
   645  	if ds == DataStructureBTree {
   646  		db.Index.bTree.delete(bucket)
   647  	}
   648  	if ds == DataStructureList {
   649  		db.Index.list.delete(bucket)
   650  	}
   651  }
   652  
   653  // buildSetIdx builds set index when opening the DB.
   654  func (db *DB) buildSetIdx(record *Record, entry *Entry) error {
   655  	key, val, meta := entry.Key, entry.Value, entry.Meta
   656  
   657  	bucket, err := db.bm.GetBucketById(entry.Meta.BucketId)
   658  	if err != nil {
   659  		return err
   660  	}
   661  	bucketId := bucket.Id
   662  
   663  	s := db.Index.set.getWithDefault(bucketId)
   664  
   665  	switch meta.Flag {
   666  	case DataSetFlag:
   667  		if err := s.SAdd(string(key), [][]byte{val}, []*Record{record}); err != nil {
   668  			return fmt.Errorf("when build SetIdx SAdd index err: %s", err)
   669  		}
   670  	case DataDeleteFlag:
   671  		if err := s.SRem(string(key), val); err != nil {
   672  			return fmt.Errorf("when build SetIdx SRem index err: %s", err)
   673  		}
   674  	}
   675  
   676  	return nil
   677  }
   678  
   679  // buildSortedSetIdx builds sorted set index when opening the DB.
   680  func (db *DB) buildSortedSetIdx(record *Record, entry *Entry) error {
   681  	key, val, meta := entry.Key, entry.Value, entry.Meta
   682  
   683  	bucket, err := db.bm.GetBucketById(entry.Meta.BucketId)
   684  	if err != nil {
   685  		return err
   686  	}
   687  	bucketId := bucket.Id
   688  
   689  	ss := db.Index.sortedSet.getWithDefault(bucketId, db)
   690  
   691  	switch meta.Flag {
   692  	case DataZAddFlag:
   693  		keyAndScore := strings.Split(string(key), SeparatorForZSetKey)
   694  		if len(keyAndScore) == 2 {
   695  			key := keyAndScore[0]
   696  			score, _ := strconv2.StrToFloat64(keyAndScore[1])
   697  			err = ss.ZAdd(key, SCORE(score), val, record)
   698  		}
   699  	case DataZRemFlag:
   700  		_, err = ss.ZRem(string(key), val)
   701  	case DataZRemRangeByRankFlag:
   702  		start, end := splitIntIntStr(string(val), SeparatorForZSetKey)
   703  		err = ss.ZRemRangeByRank(string(key), start, end)
   704  	case DataZPopMaxFlag:
   705  		_, _, err = ss.ZPopMax(string(key))
   706  	case DataZPopMinFlag:
   707  		_, _, err = ss.ZPopMin(string(key))
   708  	}
   709  
   710  	// We don't need to panic if sorted set is not found.
   711  	if err != nil && !errors.Is(err, ErrSortedSetNotFound) {
   712  		return fmt.Errorf("when build sortedSetIdx err: %s", err)
   713  	}
   714  
   715  	return nil
   716  }
   717  
   718  // buildListIdx builds List index when opening the DB.
   719  func (db *DB) buildListIdx(record *Record, entry *Entry) error {
   720  	key, val, meta := entry.Key, entry.Value, entry.Meta
   721  
   722  	bucket, err := db.bm.GetBucketById(entry.Meta.BucketId)
   723  	if err != nil {
   724  		return err
   725  	}
   726  	bucketId := bucket.Id
   727  
   728  	l := db.Index.list.getWithDefault(bucketId)
   729  
   730  	if IsExpired(meta.TTL, meta.Timestamp) {
   731  		return nil
   732  	}
   733  
   734  	switch meta.Flag {
   735  	case DataExpireListFlag:
   736  		t, _ := strconv2.StrToInt64(string(val))
   737  		ttl := uint32(t)
   738  		l.TTL[string(key)] = ttl
   739  		l.TimeStamp[string(key)] = meta.Timestamp
   740  	case DataLPushFlag:
   741  		err = l.LPush(string(key), record)
   742  	case DataRPushFlag:
   743  		err = l.RPush(string(key), record)
   744  	case DataLRemFlag:
   745  		err = db.buildListLRemIdx(val, l, key)
   746  	case DataLPopFlag:
   747  		_, err = l.LPop(string(key))
   748  	case DataRPopFlag:
   749  		_, err = l.RPop(string(key))
   750  	case DataLTrimFlag:
   751  		newKey, start := splitStringIntStr(string(key), SeparatorForListKey)
   752  		end, _ := strconv2.StrToInt(string(val))
   753  		err = l.LTrim(newKey, start, end)
   754  	case DataLRemByIndex:
   755  		indexes, _ := UnmarshalInts(val)
   756  		err = l.LRemByIndex(string(key), indexes)
   757  	}
   758  
   759  	if err != nil {
   760  		return fmt.Errorf("when build listIdx err: %s", err)
   761  	}
   762  
   763  	return nil
   764  }
   765  
   766  func (db *DB) buildListLRemIdx(value []byte, l *List, key []byte) error {
   767  	count, newValue := splitIntStringStr(string(value), SeparatorForListKey)
   768  
   769  	return l.LRem(string(key), count, func(r *Record) (bool, error) {
   770  		v, err := db.getValueByRecord(r)
   771  		if err != nil {
   772  			return false, err
   773  		}
   774  		return bytes.Equal([]byte(newValue), v), nil
   775  	})
   776  }
   777  
   778  // buildIndexes builds indexes when db initialize resource.
   779  func (db *DB) buildIndexes() (err error) {
   780  	var (
   781  		maxFileID   int64
   782  		dataFileIds []int
   783  	)
   784  
   785  	maxFileID, dataFileIds = db.getMaxFileIDAndFileIDs()
   786  
   787  	// init db.ActiveFile
   788  	db.MaxFileID = maxFileID
   789  
   790  	// set ActiveFile
   791  	if err = db.setActiveFile(); err != nil {
   792  		return
   793  	}
   794  
   795  	if dataFileIds == nil && maxFileID == 0 {
   796  		return
   797  	}
   798  
   799  	// build hint index
   800  	return db.parseDataFiles(dataFileIds)
   801  }
   802  
   803  func (db *DB) createRecordByModeWithFidAndOff(fid int64, off uint64, entry *Entry) *Record {
   804  	record := NewRecord()
   805  
   806  	record.WithKey(entry.Key).
   807  		WithTimestamp(entry.Meta.Timestamp).
   808  		WithTTL(entry.Meta.TTL).
   809  		WithTxID(entry.Meta.TxID)
   810  
   811  	if db.opt.EntryIdxMode == HintKeyValAndRAMIdxMode {
   812  		record.WithValue(entry.Value)
   813  	}
   814  
   815  	if db.opt.EntryIdxMode == HintKeyAndRAMIdxMode {
   816  		record.WithFileId(fid).
   817  			WithDataPos(off).
   818  			WithValueSize(uint32(len(entry.Value)))
   819  	}
   820  
   821  	return record
   822  }
   823  
   824  // managed calls a block of code that is fully contained in a transaction.
   825  func (db *DB) managed(writable bool, fn func(tx *Tx) error) (err error) {
   826  	var tx *Tx
   827  
   828  	tx, err = db.Begin(writable)
   829  	if err != nil {
   830  		return err
   831  	}
   832  	defer func() {
   833  		if r := recover(); r != nil {
   834  			err = fmt.Errorf("panic when executing tx, err is %+v", r)
   835  		}
   836  	}()
   837  
   838  	if err = fn(tx); err == nil {
   839  		err = tx.Commit()
   840  	} else {
   841  		if db.opt.ErrorHandler != nil {
   842  			db.opt.ErrorHandler.HandleError(err)
   843  		}
   844  
   845  		if errRollback := tx.Rollback(); errRollback != nil {
   846  			err = fmt.Errorf("%v. Rollback err: %v", err, errRollback)
   847  		}
   848  	}
   849  
   850  	return err
   851  }
   852  
   853  func (db *DB) sendToWriteCh(tx *Tx) (*request, error) {
   854  	req := requestPool.Get().(*request)
   855  	req.reset()
   856  	req.Wg.Add(1)
   857  	req.tx = tx
   858  	req.IncrRef()     // for db write
   859  	db.writeCh <- req // Handled in doWrites.
   860  	return req, nil
   861  }
   862  
   863  func (db *DB) checkListExpired() {
   864  	db.Index.list.rangeIdx(func(l *List) {
   865  		for key := range l.TTL {
   866  			l.IsExpire(key)
   867  		}
   868  	})
   869  }
   870  
   871  // IsClose return the value that represents the status of DB
   872  func (db *DB) IsClose() bool {
   873  	return db.closed
   874  }
   875  
   876  func (db *DB) buildExpireCallback(bucket string, key []byte) func() {
   877  	return func() {
   878  		err := db.Update(func(tx *Tx) error {
   879  			b, err := tx.db.bm.GetBucket(DataStructureBTree, bucket)
   880  			if err != nil {
   881  				return err
   882  			}
   883  			bucketId := b.Id
   884  			if db.tm.exist(bucketId, string(key)) {
   885  				return tx.Delete(bucket, key)
   886  			}
   887  			return nil
   888  		})
   889  		if err != nil {
   890  			log.Printf("occur error when expired deletion, error: %v", err.Error())
   891  		}
   892  	}
   893  }
   894  
   895  func (db *DB) rebuildBucketManager() error {
   896  	bucketFilePath := db.opt.Dir + "/" + BucketStoreFileName
   897  	f, err := newFileRecovery(bucketFilePath, db.opt.BufferSizeOfRecovery)
   898  	if err != nil {
   899  		return nil
   900  	}
   901  	bucketRequest := make([]*bucketSubmitRequest, 0)
   902  
   903  	for {
   904  		bucket, err := f.readBucket()
   905  		if err != nil {
   906  			// whatever which logic branch it will choose, we will release the fd.
   907  			_ = f.release()
   908  			if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
   909  				break
   910  			} else {
   911  				return err
   912  			}
   913  		}
   914  		bucketRequest = append(bucketRequest, &bucketSubmitRequest{
   915  			ds:     bucket.Ds,
   916  			name:   BucketName(bucket.Name),
   917  			bucket: bucket,
   918  		})
   919  	}
   920  
   921  	if len(bucketRequest) > 0 {
   922  		err = db.bm.SubmitPendingBucketChange(bucketRequest)
   923  		if err != nil {
   924  			return err
   925  		}
   926  	}
   927  	return nil
   928  }