github.com/MetalBlockchain/metalgo@v1.11.9/database/prefixdb/db.go (about)

     1  // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved.
     2  // See the file LICENSE for licensing terms.
     3  
     4  package prefixdb
     5  
     6  import (
     7  	"context"
     8  	"slices"
     9  	"sync"
    10  
    11  	"github.com/MetalBlockchain/metalgo/database"
    12  	"github.com/MetalBlockchain/metalgo/utils"
    13  	"github.com/MetalBlockchain/metalgo/utils/hashing"
    14  )
    15  
    16  var (
    17  	_ database.Database = (*Database)(nil)
    18  	_ database.Batch    = (*batch)(nil)
    19  	_ database.Iterator = (*iterator)(nil)
    20  )
    21  
    22  // Database partitions a database into a sub-database by prefixing all keys with
    23  // a unique value.
    24  type Database struct {
    25  	// All keys in this db begin with this byte slice
    26  	dbPrefix []byte
    27  	// Lexically one greater than dbPrefix, defining the end of this db's key range
    28  	dbLimit    []byte
    29  	bufferPool *utils.BytesPool
    30  
    31  	// lock needs to be held during Close to guarantee db will not be set to nil
    32  	// concurrently with another operation. All other operations can hold RLock.
    33  	lock sync.RWMutex
    34  	// The underlying storage
    35  	db     database.Database
    36  	closed bool
    37  }
    38  
    39  func newDB(prefix []byte, db database.Database) *Database {
    40  	return &Database{
    41  		dbPrefix:   prefix,
    42  		dbLimit:    incrementByteSlice(prefix),
    43  		db:         db,
    44  		bufferPool: utils.NewBytesPool(),
    45  	}
    46  }
    47  
    48  func incrementByteSlice(orig []byte) []byte {
    49  	n := len(orig)
    50  	buf := make([]byte, n)
    51  	copy(buf, orig)
    52  	for i := n - 1; i >= 0; i-- {
    53  		buf[i]++
    54  		if buf[i] != 0 {
    55  			break
    56  		}
    57  	}
    58  	return buf
    59  }
    60  
    61  // New returns a new prefixed database
    62  func New(prefix []byte, db database.Database) *Database {
    63  	if prefixDB, ok := db.(*Database); ok {
    64  		return newDB(
    65  			JoinPrefixes(prefixDB.dbPrefix, prefix),
    66  			prefixDB.db,
    67  		)
    68  	}
    69  	return newDB(
    70  		MakePrefix(prefix),
    71  		db,
    72  	)
    73  }
    74  
    75  // NewNested returns a new prefixed database without attempting to compress
    76  // prefixes.
    77  func NewNested(prefix []byte, db database.Database) *Database {
    78  	return newDB(
    79  		MakePrefix(prefix),
    80  		db,
    81  	)
    82  }
    83  
    84  func MakePrefix(prefix []byte) []byte {
    85  	return hashing.ComputeHash256(prefix)
    86  }
    87  
    88  func JoinPrefixes(firstPrefix, secondPrefix []byte) []byte {
    89  	simplePrefix := make([]byte, len(firstPrefix)+len(secondPrefix))
    90  	copy(simplePrefix, firstPrefix)
    91  	copy(simplePrefix[len(firstPrefix):], secondPrefix)
    92  	return MakePrefix(simplePrefix)
    93  }
    94  
    95  func PrefixKey(prefix, key []byte) []byte {
    96  	prefixedKey := make([]byte, len(prefix)+len(key))
    97  	copy(prefixedKey, prefix)
    98  	copy(prefixedKey[len(prefix):], key)
    99  	return prefixedKey
   100  }
   101  
   102  func (db *Database) Has(key []byte) (bool, error) {
   103  	db.lock.RLock()
   104  	defer db.lock.RUnlock()
   105  
   106  	if db.closed {
   107  		return false, database.ErrClosed
   108  	}
   109  	prefixedKey := db.prefix(key)
   110  	defer db.bufferPool.Put(prefixedKey)
   111  
   112  	return db.db.Has(*prefixedKey)
   113  }
   114  
   115  func (db *Database) Get(key []byte) ([]byte, error) {
   116  	db.lock.RLock()
   117  	defer db.lock.RUnlock()
   118  
   119  	if db.closed {
   120  		return nil, database.ErrClosed
   121  	}
   122  	prefixedKey := db.prefix(key)
   123  	defer db.bufferPool.Put(prefixedKey)
   124  
   125  	return db.db.Get(*prefixedKey)
   126  }
   127  
   128  func (db *Database) Put(key, value []byte) error {
   129  	db.lock.RLock()
   130  	defer db.lock.RUnlock()
   131  
   132  	if db.closed {
   133  		return database.ErrClosed
   134  	}
   135  	prefixedKey := db.prefix(key)
   136  	defer db.bufferPool.Put(prefixedKey)
   137  
   138  	return db.db.Put(*prefixedKey, value)
   139  }
   140  
   141  func (db *Database) Delete(key []byte) error {
   142  	db.lock.RLock()
   143  	defer db.lock.RUnlock()
   144  
   145  	if db.closed {
   146  		return database.ErrClosed
   147  	}
   148  	prefixedKey := db.prefix(key)
   149  	defer db.bufferPool.Put(prefixedKey)
   150  
   151  	return db.db.Delete(*prefixedKey)
   152  }
   153  
   154  func (db *Database) NewBatch() database.Batch {
   155  	return &batch{
   156  		Batch: db.db.NewBatch(),
   157  		db:    db,
   158  	}
   159  }
   160  
   161  func (db *Database) NewIterator() database.Iterator {
   162  	return db.NewIteratorWithStartAndPrefix(nil, nil)
   163  }
   164  
   165  func (db *Database) NewIteratorWithStart(start []byte) database.Iterator {
   166  	return db.NewIteratorWithStartAndPrefix(start, nil)
   167  }
   168  
   169  func (db *Database) NewIteratorWithPrefix(prefix []byte) database.Iterator {
   170  	return db.NewIteratorWithStartAndPrefix(nil, prefix)
   171  }
   172  
   173  // Assumes it is safe to modify the arguments to db.db.NewIteratorWithStartAndPrefix after it returns.
   174  // It is safe to modify [start] and [prefix] after this method returns.
   175  func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator {
   176  	db.lock.RLock()
   177  	defer db.lock.RUnlock()
   178  
   179  	if db.closed {
   180  		return &database.IteratorError{
   181  			Err: database.ErrClosed,
   182  		}
   183  	}
   184  
   185  	prefixedStart := db.prefix(start)
   186  	defer db.bufferPool.Put(prefixedStart)
   187  
   188  	prefixedPrefix := db.prefix(prefix)
   189  	defer db.bufferPool.Put(prefixedPrefix)
   190  
   191  	return &iterator{
   192  		Iterator: db.db.NewIteratorWithStartAndPrefix(*prefixedStart, *prefixedPrefix),
   193  		db:       db,
   194  	}
   195  }
   196  
   197  func (db *Database) Compact(start, limit []byte) error {
   198  	db.lock.RLock()
   199  	defer db.lock.RUnlock()
   200  
   201  	if db.closed {
   202  		return database.ErrClosed
   203  	}
   204  
   205  	prefixedStart := db.prefix(start)
   206  	defer db.bufferPool.Put(prefixedStart)
   207  
   208  	if limit == nil {
   209  		return db.db.Compact(*prefixedStart, db.dbLimit)
   210  	}
   211  	prefixedLimit := db.prefix(limit)
   212  	defer db.bufferPool.Put(prefixedLimit)
   213  
   214  	return db.db.Compact(*prefixedStart, *prefixedLimit)
   215  }
   216  
   217  func (db *Database) Close() error {
   218  	db.lock.Lock()
   219  	defer db.lock.Unlock()
   220  
   221  	if db.closed {
   222  		return database.ErrClosed
   223  	}
   224  	db.closed = true
   225  	return nil
   226  }
   227  
   228  func (db *Database) isClosed() bool {
   229  	db.lock.RLock()
   230  	defer db.lock.RUnlock()
   231  
   232  	return db.closed
   233  }
   234  
   235  func (db *Database) HealthCheck(ctx context.Context) (interface{}, error) {
   236  	db.lock.RLock()
   237  	defer db.lock.RUnlock()
   238  
   239  	if db.closed {
   240  		return nil, database.ErrClosed
   241  	}
   242  	return db.db.HealthCheck(ctx)
   243  }
   244  
   245  // Return a copy of [key], prepended with this db's prefix.
   246  // The returned slice should be put back in the pool when it's done being used.
   247  func (db *Database) prefix(key []byte) *[]byte {
   248  	keyLen := len(db.dbPrefix) + len(key)
   249  	prefixedKey := db.bufferPool.Get(keyLen)
   250  	copy(*prefixedKey, db.dbPrefix)
   251  	copy((*prefixedKey)[len(db.dbPrefix):], key)
   252  	return prefixedKey
   253  }
   254  
   255  // Batch of database operations
   256  type batch struct {
   257  	database.Batch
   258  	db *Database
   259  
   260  	// Each key is prepended with the database's prefix.
   261  	// Each byte slice underlying a key should be returned to the pool
   262  	// when this batch is reset.
   263  	ops []batchOp
   264  }
   265  
   266  type batchOp struct {
   267  	Key    *[]byte
   268  	Value  []byte
   269  	Delete bool
   270  }
   271  
   272  func (b *batch) Put(key, value []byte) error {
   273  	prefixedKey := b.db.prefix(key)
   274  	copiedValue := slices.Clone(value)
   275  	b.ops = append(b.ops, batchOp{
   276  		Key:   prefixedKey,
   277  		Value: copiedValue,
   278  	})
   279  	return b.Batch.Put(*prefixedKey, copiedValue)
   280  }
   281  
   282  func (b *batch) Delete(key []byte) error {
   283  	prefixedKey := b.db.prefix(key)
   284  	b.ops = append(b.ops, batchOp{
   285  		Key:    prefixedKey,
   286  		Delete: true,
   287  	})
   288  	return b.Batch.Delete(*prefixedKey)
   289  }
   290  
   291  // Write flushes any accumulated data to the memory database.
   292  func (b *batch) Write() error {
   293  	b.db.lock.RLock()
   294  	defer b.db.lock.RUnlock()
   295  
   296  	if b.db.closed {
   297  		return database.ErrClosed
   298  	}
   299  	return b.Batch.Write()
   300  }
   301  
   302  // Reset resets the batch for reuse.
   303  func (b *batch) Reset() {
   304  	// Return the byte buffers underneath each key back to the pool.
   305  	// Don't return the byte buffers underneath each value back to the pool
   306  	// because we assume in batch.Replay that it's not safe to modify the
   307  	// value argument to w.Put.
   308  	for _, op := range b.ops {
   309  		b.db.bufferPool.Put(op.Key)
   310  	}
   311  
   312  	// Clear b.writes
   313  	if cap(b.ops) > len(b.ops)*database.MaxExcessCapacityFactor {
   314  		b.ops = make([]batchOp, 0, cap(b.ops)/database.CapacityReductionFactor)
   315  	} else {
   316  		b.ops = b.ops[:0]
   317  	}
   318  	b.Batch.Reset()
   319  }
   320  
   321  // Replay the batch contents.
   322  func (b *batch) Replay(w database.KeyValueWriterDeleter) error {
   323  	for _, op := range b.ops {
   324  		keyWithoutPrefix := (*op.Key)[len(b.db.dbPrefix):]
   325  		if op.Delete {
   326  			if err := w.Delete(keyWithoutPrefix); err != nil {
   327  				return err
   328  			}
   329  		} else {
   330  			if err := w.Put(keyWithoutPrefix, op.Value); err != nil {
   331  				return err
   332  			}
   333  		}
   334  	}
   335  	return nil
   336  }
   337  
   338  type iterator struct {
   339  	database.Iterator
   340  	db *Database
   341  
   342  	key, val []byte
   343  	err      error
   344  }
   345  
   346  // Next calls the inner iterators Next() function and strips the keys prefix
   347  func (it *iterator) Next() bool {
   348  	if it.db.isClosed() {
   349  		it.key = nil
   350  		it.val = nil
   351  		it.err = database.ErrClosed
   352  		return false
   353  	}
   354  
   355  	hasNext := it.Iterator.Next()
   356  	if hasNext {
   357  		key := it.Iterator.Key()
   358  		if prefixLen := len(it.db.dbPrefix); len(key) >= prefixLen {
   359  			key = key[prefixLen:]
   360  		}
   361  		it.key = key
   362  		it.val = it.Iterator.Value()
   363  	} else {
   364  		it.key = nil
   365  		it.val = nil
   366  	}
   367  
   368  	return hasNext
   369  }
   370  
   371  func (it *iterator) Key() []byte {
   372  	return it.key
   373  }
   374  
   375  func (it *iterator) Value() []byte {
   376  	return it.val
   377  }
   378  
   379  // Error returns [database.ErrClosed] if the underlying db was closed
   380  // otherwise it returns the normal iterator error.
   381  func (it *iterator) Error() error {
   382  	if it.err != nil {
   383  		return it.err
   384  	}
   385  	return it.Iterator.Error()
   386  }