github.com/tacshi/go-ethereum@v0.0.0-20230616113857-84a434e20921/ethdb/pebble/pebble.go (about) 1 // Copyright 2023 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 //go:build (arm64 || amd64) && !openbsd 18 19 // Package pebble implements the key-value database layer based on pebble. 20 package pebble 21 22 import ( 23 "bytes" 24 "fmt" 25 "runtime" 26 "sync" 27 "sync/atomic" 28 "time" 29 30 "github.com/cockroachdb/pebble" 31 "github.com/cockroachdb/pebble/bloom" 32 "github.com/tacshi/go-ethereum/common" 33 "github.com/tacshi/go-ethereum/ethdb" 34 "github.com/tacshi/go-ethereum/log" 35 "github.com/tacshi/go-ethereum/metrics" 36 ) 37 38 const ( 39 // minCache is the minimum amount of memory in megabytes to allocate to pebble 40 // read and write caching, split half and half. 41 minCache = 16 42 43 // minHandles is the minimum number of files handles to allocate to the open 44 // database files. 45 minHandles = 16 46 47 // metricsGatheringInterval specifies the interval to retrieve pebble database 48 // compaction, io and pause stats to report to the user. 49 metricsGatheringInterval = 3 * time.Second 50 ) 51 52 // Database is a persistent key-value store based on the pebble storage engine. 53 // Apart from basic data storage functionality it also supports batch writes and 54 // iterating over the keyspace in binary-alphabetical order. 55 type Database struct { 56 fn string // filename for reporting 57 db *pebble.DB // Underlying pebble storage engine 58 59 compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction 60 compReadMeter metrics.Meter // Meter for measuring the data read during compaction 61 compWriteMeter metrics.Meter // Meter for measuring the data written during compaction 62 writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction 63 writeDelayMeter metrics.Meter // Meter for measuring the write delay duration due to database compaction 64 diskSizeGauge metrics.Gauge // Gauge for tracking the size of all the levels in the database 65 diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read 66 diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written 67 memCompGauge metrics.Gauge // Gauge for tracking the number of memory compaction 68 level0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in level0 69 nonlevel0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in non0 level 70 seekCompGauge metrics.Gauge // Gauge for tracking the number of table compaction caused by read opt 71 manualMemAllocGauge metrics.Gauge // Gauge for tracking amount of non-managed memory currently allocated 72 73 quitLock sync.Mutex // Mutex protecting the quit channel access 74 quitChan chan chan error // Quit channel to stop the metrics collection before closing the database 75 76 log log.Logger // Contextual logger tracking the database path 77 78 activeComp int // Current number of active compactions 79 compStartTime time.Time // The start time of the earliest currently-active compaction 80 compTime int64 // Total time spent in compaction in ns 81 level0Comp uint32 // Total number of level-zero compactions 82 nonLevel0Comp uint32 // Total number of non level-zero compactions 83 writeDelayStartTime time.Time // The start time of the latest write stall 84 writeDelayCount int64 // Total number of write stall counts 85 writeDelayTime int64 // Total time spent in write stalls 86 } 87 88 func (d *Database) onCompactionBegin(info pebble.CompactionInfo) { 89 if d.activeComp == 0 { 90 d.compStartTime = time.Now() 91 } 92 l0 := info.Input[0] 93 if l0.Level == 0 { 94 atomic.AddUint32(&d.level0Comp, 1) 95 } else { 96 atomic.AddUint32(&d.nonLevel0Comp, 1) 97 } 98 d.activeComp++ 99 } 100 101 func (d *Database) onCompactionEnd(info pebble.CompactionInfo) { 102 if d.activeComp == 1 { 103 atomic.AddInt64(&d.compTime, int64(time.Since(d.compStartTime))) 104 } else if d.activeComp == 0 { 105 panic("should not happen") 106 } 107 d.activeComp-- 108 } 109 110 func (d *Database) onWriteStallBegin(b pebble.WriteStallBeginInfo) { 111 d.writeDelayStartTime = time.Now() 112 } 113 114 func (d *Database) onWriteStallEnd() { 115 atomic.AddInt64(&d.writeDelayTime, int64(time.Since(d.writeDelayStartTime))) 116 } 117 118 // New returns a wrapped pebble DB object. The namespace is the prefix that the 119 // metrics reporting should use for surfacing internal stats. 120 func New(file string, cache int, handles int, namespace string, readonly bool) (*Database, error) { 121 // Ensure we have some minimal caching and file guarantees 122 if cache < minCache { 123 cache = minCache 124 } 125 if handles < minHandles { 126 handles = minHandles 127 } 128 logger := log.New("database", file) 129 logger.Info("Allocated cache and file handles", "cache", common.StorageSize(cache*1024*1024), "handles", handles) 130 131 // The max memtable size is limited by the uint32 offsets stored in 132 // internal/arenaskl.node, DeferredBatchOp, and flushableBatchEntry. 133 // Taken from https://github.com/cockroachdb/pebble/blob/master/open.go#L38 134 maxMemTableSize := 4<<30 - 1 // Capped by 4 GB 135 136 // Two memory tables is configured which is identical to leveldb, 137 // including a frozen memory table and another live one. 138 memTableLimit := 2 139 memTableSize := cache * 1024 * 1024 / 2 / memTableLimit 140 if memTableSize > maxMemTableSize { 141 memTableSize = maxMemTableSize 142 } 143 db := &Database{ 144 fn: file, 145 log: logger, 146 quitChan: make(chan chan error), 147 } 148 opt := &pebble.Options{ 149 // Pebble has a single combined cache area and the write 150 // buffers are taken from this too. Assign all available 151 // memory allowance for cache. 152 Cache: pebble.NewCache(int64(cache * 1024 * 1024)), 153 MaxOpenFiles: handles, 154 155 // The size of memory table(as well as the write buffer). 156 // Note, there may have more than two memory tables in the system. 157 MemTableSize: memTableSize, 158 159 // MemTableStopWritesThreshold places a hard limit on the size 160 // of the existent MemTables(including the frozen one). 161 // Note, this must be the number of tables not the size of all memtables 162 // according to https://github.com/cockroachdb/pebble/blob/master/options.go#L738-L742 163 // and to https://github.com/cockroachdb/pebble/blob/master/db.go#L1892-L1903. 164 MemTableStopWritesThreshold: memTableLimit, 165 166 // The default compaction concurrency(1 thread), 167 // Here use all available CPUs for faster compaction. 168 MaxConcurrentCompactions: func() int { return runtime.NumCPU() }, 169 170 // Per-level options. Options for at least one level must be specified. The 171 // options for the last level are used for all subsequent levels. 172 Levels: []pebble.LevelOptions{ 173 {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, 174 {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, 175 {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, 176 {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, 177 {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, 178 {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, 179 {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, 180 }, 181 ReadOnly: readonly, 182 EventListener: &pebble.EventListener{ 183 CompactionBegin: db.onCompactionBegin, 184 CompactionEnd: db.onCompactionEnd, 185 WriteStallBegin: db.onWriteStallBegin, 186 WriteStallEnd: db.onWriteStallEnd, 187 }, 188 } 189 // Disable seek compaction explicitly. Check https://github.com/tacshi/go-ethereum/pull/20130 190 // for more details. 191 opt.Experimental.ReadSamplingMultiplier = -1 192 193 // Open the db and recover any potential corruptions 194 innerDB, err := pebble.Open(file, opt) 195 if err != nil { 196 return nil, err 197 } 198 db.db = innerDB 199 200 db.compTimeMeter = metrics.NewRegisteredMeter(namespace+"compact/time", nil) 201 db.compReadMeter = metrics.NewRegisteredMeter(namespace+"compact/input", nil) 202 db.compWriteMeter = metrics.NewRegisteredMeter(namespace+"compact/output", nil) 203 db.diskSizeGauge = metrics.NewRegisteredGauge(namespace+"disk/size", nil) 204 db.diskReadMeter = metrics.NewRegisteredMeter(namespace+"disk/read", nil) 205 db.diskWriteMeter = metrics.NewRegisteredMeter(namespace+"disk/write", nil) 206 db.writeDelayMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/duration", nil) 207 db.writeDelayNMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/counter", nil) 208 db.memCompGauge = metrics.NewRegisteredGauge(namespace+"compact/memory", nil) 209 db.level0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/level0", nil) 210 db.nonlevel0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/nonlevel0", nil) 211 db.seekCompGauge = metrics.NewRegisteredGauge(namespace+"compact/seek", nil) 212 db.manualMemAllocGauge = metrics.NewRegisteredGauge(namespace+"memory/manualalloc", nil) 213 214 // Start up the metrics gathering and return 215 go db.meter(metricsGatheringInterval) 216 return db, nil 217 } 218 219 // Close stops the metrics collection, flushes any pending data to disk and closes 220 // all io accesses to the underlying key-value store. 221 func (d *Database) Close() error { 222 d.quitLock.Lock() 223 defer d.quitLock.Unlock() 224 225 if d.quitChan != nil { 226 errc := make(chan error) 227 d.quitChan <- errc 228 if err := <-errc; err != nil { 229 d.log.Error("Metrics collection failed", "err", err) 230 } 231 d.quitChan = nil 232 } 233 return d.db.Close() 234 } 235 236 // Has retrieves if a key is present in the key-value store. 237 func (d *Database) Has(key []byte) (bool, error) { 238 _, closer, err := d.db.Get(key) 239 if err == pebble.ErrNotFound { 240 return false, nil 241 } else if err != nil { 242 return false, err 243 } 244 closer.Close() 245 return true, nil 246 } 247 248 // Get retrieves the given key if it's present in the key-value store. 249 func (d *Database) Get(key []byte) ([]byte, error) { 250 dat, closer, err := d.db.Get(key) 251 if err != nil { 252 return nil, err 253 } 254 ret := make([]byte, len(dat)) 255 copy(ret, dat) 256 closer.Close() 257 return ret, nil 258 } 259 260 // Put inserts the given value into the key-value store. 261 func (d *Database) Put(key []byte, value []byte) error { 262 return d.db.Set(key, value, pebble.NoSync) 263 } 264 265 // Delete removes the key from the key-value store. 266 func (d *Database) Delete(key []byte) error { 267 return d.db.Delete(key, nil) 268 } 269 270 // NewBatch creates a write-only key-value store that buffers changes to its host 271 // database until a final write is called. 272 func (d *Database) NewBatch() ethdb.Batch { 273 return &batch{ 274 b: d.db.NewBatch(), 275 } 276 } 277 278 // NewBatchWithSize creates a write-only database batch with pre-allocated buffer. 279 // It's not supported by pebble, but pebble has better memory allocation strategy 280 // which turns out a lot faster than leveldb. It's performant enough to construct 281 // batch object without any pre-allocated space. 282 func (d *Database) NewBatchWithSize(_ int) ethdb.Batch { 283 return &batch{ 284 b: d.db.NewBatch(), 285 } 286 } 287 288 // snapshot wraps a pebble snapshot for implementing the Snapshot interface. 289 type snapshot struct { 290 db *pebble.Snapshot 291 } 292 293 // NewSnapshot creates a database snapshot based on the current state. 294 // The created snapshot will not be affected by all following mutations 295 // happened on the database. 296 // Note don't forget to release the snapshot once it's used up, otherwise 297 // the stale data will never be cleaned up by the underlying compactor. 298 func (d *Database) NewSnapshot() (ethdb.Snapshot, error) { 299 snap := d.db.NewSnapshot() 300 return &snapshot{db: snap}, nil 301 } 302 303 // Has retrieves if a key is present in the snapshot backing by a key-value 304 // data store. 305 func (snap *snapshot) Has(key []byte) (bool, error) { 306 _, closer, err := snap.db.Get(key) 307 if err != nil { 308 if err != pebble.ErrNotFound { 309 return false, err 310 } else { 311 return false, nil 312 } 313 } 314 closer.Close() 315 return true, nil 316 } 317 318 // Get retrieves the given key if it's present in the snapshot backing by 319 // key-value data store. 320 func (snap *snapshot) Get(key []byte) ([]byte, error) { 321 dat, closer, err := snap.db.Get(key) 322 if err != nil { 323 return nil, err 324 } 325 ret := make([]byte, len(dat)) 326 copy(ret, dat) 327 closer.Close() 328 return ret, nil 329 } 330 331 // Release releases associated resources. Release should always succeed and can 332 // be called multiple times without causing error. 333 func (snap *snapshot) Release() { 334 snap.db.Close() 335 } 336 337 // upperBound returns the upper bound for the given prefix 338 func upperBound(prefix []byte) (limit []byte) { 339 for i := len(prefix) - 1; i >= 0; i-- { 340 c := prefix[i] 341 if c == 0xff { 342 continue 343 } 344 limit = make([]byte, i+1) 345 copy(limit, prefix) 346 limit[i] = c + 1 347 break 348 } 349 return limit 350 } 351 352 // Stat returns a particular internal stat of the database. 353 func (d *Database) Stat(property string) (string, error) { 354 return "", nil 355 } 356 357 // Compact flattens the underlying data store for the given key range. In essence, 358 // deleted and overwritten versions are discarded, and the data is rearranged to 359 // reduce the cost of operations needed to access them. 360 // 361 // A nil start is treated as a key before all keys in the data store; a nil limit 362 // is treated as a key after all keys in the data store. If both is nil then it 363 // will compact entire data store. 364 func (d *Database) Compact(start []byte, limit []byte) error { 365 // There is no special flag to represent the end of key range 366 // in pebble(nil in leveldb). Use an ugly hack to construct a 367 // large key to represent it. 368 // Note any prefixed database entry will be smaller than this 369 // flag, as for trie nodes we need the 32 byte 0xff because 370 // there might be a shared prefix starting with a number of 371 // 0xff-s, so 32 ensures than only a hash collision could touch it. 372 // https://github.com/cockroachdb/pebble/issues/2359#issuecomment-1443995833 373 if limit == nil { 374 limit = bytes.Repeat([]byte{0xff}, 32) 375 } 376 return d.db.Compact(start, limit, true) // Parallelization is preferred 377 } 378 379 // Path returns the path to the database directory. 380 func (d *Database) Path() string { 381 return d.fn 382 } 383 384 // meter periodically retrieves internal pebble counters and reports them to 385 // the metrics subsystem. 386 func (d *Database) meter(refresh time.Duration) { 387 var errc chan error 388 timer := time.NewTimer(refresh) 389 defer timer.Stop() 390 391 // Create storage and warning log tracer for write delay. 392 var ( 393 compTimes [2]int64 394 writeDelayTimes [2]int64 395 writeDelayCounts [2]int64 396 compWrites [2]int64 397 compReads [2]int64 398 399 nWrites [2]int64 400 ) 401 402 // Iterate ad infinitum and collect the stats 403 for i := 1; errc == nil; i++ { 404 var ( 405 compWrite int64 406 compRead int64 407 nWrite int64 408 409 metrics = d.db.Metrics() 410 compTime = atomic.LoadInt64(&d.compTime) 411 writeDelayCount = atomic.LoadInt64(&d.writeDelayCount) 412 writeDelayTime = atomic.LoadInt64(&d.writeDelayTime) 413 nonLevel0CompCount = int64(atomic.LoadUint32(&d.nonLevel0Comp)) 414 level0CompCount = int64(atomic.LoadUint32(&d.level0Comp)) 415 ) 416 writeDelayTimes[i%2] = writeDelayTime 417 writeDelayCounts[i%2] = writeDelayCount 418 compTimes[i%2] = compTime 419 420 for _, levelMetrics := range metrics.Levels { 421 nWrite += int64(levelMetrics.BytesCompacted) 422 nWrite += int64(levelMetrics.BytesFlushed) 423 compWrite += int64(levelMetrics.BytesCompacted) 424 compRead += int64(levelMetrics.BytesRead) 425 } 426 427 nWrite += int64(metrics.WAL.BytesWritten) 428 429 compWrites[i%2] = compWrite 430 compReads[i%2] = compRead 431 nWrites[i%2] = nWrite 432 433 if d.writeDelayNMeter != nil { 434 d.writeDelayNMeter.Mark(writeDelayCounts[i%2] - writeDelayCounts[(i-1)%2]) 435 } 436 if d.writeDelayMeter != nil { 437 d.writeDelayMeter.Mark(writeDelayTimes[i%2] - writeDelayTimes[(i-1)%2]) 438 } 439 if d.compTimeMeter != nil { 440 d.compTimeMeter.Mark(compTimes[i%2] - compTimes[(i-1)%2]) 441 } 442 if d.compReadMeter != nil { 443 d.compReadMeter.Mark(compReads[i%2] - compReads[(i-1)%2]) 444 } 445 if d.compWriteMeter != nil { 446 d.compWriteMeter.Mark(compWrites[i%2] - compWrites[(i-1)%2]) 447 } 448 if d.diskSizeGauge != nil { 449 d.diskSizeGauge.Update(int64(metrics.DiskSpaceUsage())) 450 } 451 if d.diskReadMeter != nil { 452 d.diskReadMeter.Mark(0) // pebble doesn't track non-compaction reads 453 } 454 if d.diskWriteMeter != nil { 455 d.diskWriteMeter.Mark(nWrites[i%2] - nWrites[(i-1)%2]) 456 } 457 // See https://github.com/cockroachdb/pebble/pull/1628#pullrequestreview-1026664054 458 manuallyAllocated := metrics.BlockCache.Size + int64(metrics.MemTable.Size) + int64(metrics.MemTable.ZombieSize) 459 d.manualMemAllocGauge.Update(manuallyAllocated) 460 d.memCompGauge.Update(metrics.Flush.Count) 461 d.nonlevel0CompGauge.Update(nonLevel0CompCount) 462 d.level0CompGauge.Update(level0CompCount) 463 d.seekCompGauge.Update(metrics.Compact.ReadCount) 464 465 // Sleep a bit, then repeat the stats collection 466 select { 467 case errc = <-d.quitChan: 468 // Quit requesting, stop hammering the database 469 case <-timer.C: 470 timer.Reset(refresh) 471 // Timeout, gather a new set of stats 472 } 473 } 474 errc <- nil 475 } 476 477 // batch is a write-only batch that commits changes to its host database 478 // when Write is called. A batch cannot be used concurrently. 479 type batch struct { 480 b *pebble.Batch 481 size int 482 } 483 484 // Put inserts the given value into the batch for later committing. 485 func (b *batch) Put(key, value []byte) error { 486 b.b.Set(key, value, nil) 487 b.size += len(key) + len(value) 488 return nil 489 } 490 491 // Delete inserts the a key removal into the batch for later committing. 492 func (b *batch) Delete(key []byte) error { 493 b.b.Delete(key, nil) 494 b.size += len(key) 495 return nil 496 } 497 498 // ValueSize retrieves the amount of data queued up for writing. 499 func (b *batch) ValueSize() int { 500 return b.size 501 } 502 503 // Write flushes any accumulated data to disk. 504 func (b *batch) Write() error { 505 return b.b.Commit(pebble.NoSync) 506 } 507 508 // Reset resets the batch for reuse. 509 func (b *batch) Reset() { 510 b.b.Reset() 511 b.size = 0 512 } 513 514 // Replay replays the batch contents. 515 func (b *batch) Replay(w ethdb.KeyValueWriter) error { 516 reader := b.b.Reader() 517 for { 518 kind, k, v, ok := reader.Next() 519 if !ok { 520 break 521 } 522 // The (k,v) slices might be overwritten if the batch is reset/reused, 523 // and the receiver should copy them if they are to be retained long-term. 524 if kind == pebble.InternalKeyKindSet { 525 w.Put(k, v) 526 } else if kind == pebble.InternalKeyKindDelete { 527 w.Delete(k) 528 } else { 529 return fmt.Errorf("unhandled operation, keytype: %v", kind) 530 } 531 } 532 return nil 533 } 534 535 // pebbleIterator is a wrapper of underlying iterator in storage engine. 536 // The purpose of this structure is to implement the missing APIs. 537 type pebbleIterator struct { 538 iter *pebble.Iterator 539 moved bool 540 } 541 542 // NewIterator creates a binary-alphabetical iterator over a subset 543 // of database content with a particular key prefix, starting at a particular 544 // initial key (or after, if it does not exist). 545 func (d *Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator { 546 iter := d.db.NewIter(&pebble.IterOptions{ 547 LowerBound: append(prefix, start...), 548 UpperBound: upperBound(prefix), 549 }) 550 iter.First() 551 return &pebbleIterator{iter: iter, moved: true} 552 } 553 554 // Next moves the iterator to the next key/value pair. It returns whether the 555 // iterator is exhausted. 556 func (iter *pebbleIterator) Next() bool { 557 if iter.moved { 558 iter.moved = false 559 return iter.iter.Valid() 560 } 561 return iter.iter.Next() 562 } 563 564 // Error returns any accumulated error. Exhausting all the key/value pairs 565 // is not considered to be an error. 566 func (iter *pebbleIterator) Error() error { 567 return iter.iter.Error() 568 } 569 570 // Key returns the key of the current key/value pair, or nil if done. The caller 571 // should not modify the contents of the returned slice, and its contents may 572 // change on the next call to Next. 573 func (iter *pebbleIterator) Key() []byte { 574 return iter.iter.Key() 575 } 576 577 // Value returns the value of the current key/value pair, or nil if done. The 578 // caller should not modify the contents of the returned slice, and its contents 579 // may change on the next call to Next. 580 func (iter *pebbleIterator) Value() []byte { 581 return iter.iter.Value() 582 } 583 584 // Release releases associated resources. Release should always succeed and can 585 // be called multiple times without causing error. 586 func (iter *pebbleIterator) Release() { iter.iter.Close() }