github.com/MetalBlockchain/metalgo@v1.11.9/database/pebbledb/db.go (about) 1 // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. 2 // See the file LICENSE for licensing terms. 3 4 package pebbledb 5 6 import ( 7 "context" 8 "encoding/json" 9 "errors" 10 "slices" 11 "sync" 12 13 "github.com/cockroachdb/pebble" 14 "github.com/prometheus/client_golang/prometheus" 15 "go.uber.org/zap" 16 17 "github.com/MetalBlockchain/metalgo/database" 18 "github.com/MetalBlockchain/metalgo/utils/logging" 19 "github.com/MetalBlockchain/metalgo/utils/set" 20 "github.com/MetalBlockchain/metalgo/utils/units" 21 ) 22 23 const ( 24 Name = "pebbledb" 25 26 // pebbleByteOverHead is the number of bytes of constant overhead that 27 // should be added to a batch size per operation. 28 pebbleByteOverHead = 8 29 30 defaultCacheSize = 512 * units.MiB 31 ) 32 33 var ( 34 _ database.Database = (*Database)(nil) 35 36 errInvalidOperation = errors.New("invalid operation") 37 38 DefaultConfig = Config{ 39 CacheSize: defaultCacheSize, 40 BytesPerSync: 512 * units.KiB, 41 WALBytesPerSync: 0, // Default to no background syncing. 42 MemTableStopWritesThreshold: 8, 43 MemTableSize: defaultCacheSize / 4, 44 MaxOpenFiles: 4096, 45 MaxConcurrentCompactions: 1, 46 } 47 ) 48 49 type Database struct { 50 lock sync.RWMutex 51 pebbleDB *pebble.DB 52 closed bool 53 openIterators set.Set[*iter] 54 } 55 56 type Config struct { 57 CacheSize int64 `json:"cacheSize"` 58 BytesPerSync int `json:"bytesPerSync"` 59 WALBytesPerSync int `json:"walBytesPerSync"` // 0 means no background syncing 60 MemTableStopWritesThreshold int `json:"memTableStopWritesThreshold"` 61 MemTableSize uint64 `json:"memTableSize"` 62 MaxOpenFiles int `json:"maxOpenFiles"` 63 MaxConcurrentCompactions int `json:"maxConcurrentCompactions"` 64 } 65 66 // TODO: Add metrics 67 func New(file string, configBytes []byte, log logging.Logger, _ prometheus.Registerer) (database.Database, error) { 68 cfg := DefaultConfig 69 if len(configBytes) > 0 { 70 if err := json.Unmarshal(configBytes, &cfg); err != nil { 71 return nil, err 72 } 73 } 74 75 opts := &pebble.Options{ 76 Cache: pebble.NewCache(cfg.CacheSize), 77 BytesPerSync: cfg.BytesPerSync, 78 Comparer: pebble.DefaultComparer, 79 WALBytesPerSync: cfg.WALBytesPerSync, 80 MemTableStopWritesThreshold: cfg.MemTableStopWritesThreshold, 81 MemTableSize: cfg.MemTableSize, 82 MaxOpenFiles: cfg.MaxOpenFiles, 83 MaxConcurrentCompactions: func() int { return cfg.MaxConcurrentCompactions }, 84 } 85 opts.Experimental.ReadSamplingMultiplier = -1 // Disable seek compaction 86 87 log.Info( 88 "opening pebble", 89 zap.Reflect("config", cfg), 90 ) 91 92 db, err := pebble.Open(file, opts) 93 return &Database{ 94 pebbleDB: db, 95 openIterators: set.Set[*iter]{}, 96 }, err 97 } 98 99 func (db *Database) Close() error { 100 db.lock.Lock() 101 defer db.lock.Unlock() 102 103 if db.closed { 104 return database.ErrClosed 105 } 106 107 db.closed = true 108 109 for iter := range db.openIterators { 110 iter.lock.Lock() 111 iter.release() 112 iter.lock.Unlock() 113 } 114 db.openIterators.Clear() 115 116 return updateError(db.pebbleDB.Close()) 117 } 118 119 func (db *Database) HealthCheck(_ context.Context) (interface{}, error) { 120 db.lock.RLock() 121 defer db.lock.RUnlock() 122 123 if db.closed { 124 return nil, database.ErrClosed 125 } 126 return nil, nil 127 } 128 129 func (db *Database) Has(key []byte) (bool, error) { 130 db.lock.RLock() 131 defer db.lock.RUnlock() 132 133 if db.closed { 134 return false, database.ErrClosed 135 } 136 137 _, closer, err := db.pebbleDB.Get(key) 138 if err == pebble.ErrNotFound { 139 return false, nil 140 } 141 if err != nil { 142 return false, updateError(err) 143 } 144 return true, closer.Close() 145 } 146 147 func (db *Database) Get(key []byte) ([]byte, error) { 148 db.lock.RLock() 149 defer db.lock.RUnlock() 150 151 if db.closed { 152 return nil, database.ErrClosed 153 } 154 155 data, closer, err := db.pebbleDB.Get(key) 156 if err != nil { 157 return nil, updateError(err) 158 } 159 return slices.Clone(data), closer.Close() 160 } 161 162 func (db *Database) Put(key []byte, value []byte) error { 163 db.lock.RLock() 164 defer db.lock.RUnlock() 165 166 if db.closed { 167 return database.ErrClosed 168 } 169 170 return updateError(db.pebbleDB.Set(key, value, pebble.Sync)) 171 } 172 173 func (db *Database) Delete(key []byte) error { 174 db.lock.RLock() 175 defer db.lock.RUnlock() 176 177 if db.closed { 178 return database.ErrClosed 179 } 180 181 return updateError(db.pebbleDB.Delete(key, pebble.Sync)) 182 } 183 184 func (db *Database) Compact(start []byte, end []byte) error { 185 db.lock.RLock() 186 defer db.lock.RUnlock() 187 188 if db.closed { 189 return database.ErrClosed 190 } 191 192 if end == nil { 193 // The database.Database spec treats a nil [limit] as a key after all 194 // keys but pebble treats a nil [limit] as a key before all keys in 195 // Compact. Use the greatest key in the database as the [limit] to get 196 // the desired behavior. 197 it, err := db.pebbleDB.NewIter(&pebble.IterOptions{}) 198 if err != nil { 199 return updateError(err) 200 } 201 202 if !it.Last() { 203 // The database is empty. 204 return it.Close() 205 } 206 207 end = slices.Clone(it.Key()) 208 if err := it.Close(); err != nil { 209 return err 210 } 211 } 212 213 if pebble.DefaultComparer.Compare(start, end) >= 1 { 214 // pebble requires [start] < [end] 215 return nil 216 } 217 218 return updateError(db.pebbleDB.Compact(start, end, true /* parallelize */)) 219 } 220 221 func (db *Database) NewIterator() database.Iterator { 222 return db.NewIteratorWithStartAndPrefix(nil, nil) 223 } 224 225 func (db *Database) NewIteratorWithStart(start []byte) database.Iterator { 226 return db.NewIteratorWithStartAndPrefix(start, nil) 227 } 228 229 func (db *Database) NewIteratorWithPrefix(prefix []byte) database.Iterator { 230 return db.NewIteratorWithStartAndPrefix(nil, prefix) 231 } 232 233 func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator { 234 db.lock.Lock() 235 defer db.lock.Unlock() 236 237 if db.closed { 238 return &iter{ 239 db: db, 240 closed: true, 241 err: database.ErrClosed, 242 } 243 } 244 245 it, err := db.pebbleDB.NewIter(keyRange(start, prefix)) 246 if err != nil { 247 return &iter{ 248 db: db, 249 closed: true, 250 err: updateError(err), 251 } 252 } 253 254 iter := &iter{ 255 db: db, 256 iter: it, 257 } 258 db.openIterators.Add(iter) 259 return iter 260 } 261 262 // Converts a pebble-specific error to its Avalanche equivalent, if applicable. 263 func updateError(err error) error { 264 switch err { 265 case pebble.ErrClosed: 266 return database.ErrClosed 267 case pebble.ErrNotFound: 268 return database.ErrNotFound 269 default: 270 return err 271 } 272 } 273 274 func keyRange(start, prefix []byte) *pebble.IterOptions { 275 opt := &pebble.IterOptions{ 276 LowerBound: prefix, 277 UpperBound: prefixToUpperBound(prefix), 278 } 279 if pebble.DefaultComparer.Compare(start, prefix) == 1 { 280 opt.LowerBound = start 281 } 282 return opt 283 } 284 285 // Returns an upper bound that stops after all keys with the given [prefix]. 286 // Assumes the Database uses bytes.Compare for key comparison and not a custom 287 // comparer. 288 func prefixToUpperBound(prefix []byte) []byte { 289 for i := len(prefix) - 1; i >= 0; i-- { 290 if prefix[i] != 0xFF { 291 upperBound := make([]byte, i+1) 292 copy(upperBound, prefix) 293 upperBound[i]++ 294 return upperBound 295 } 296 } 297 return nil 298 }