github.com/theQRL/go-zond@v0.2.1/zonddb/pebble/pebble.go (about) 1 // Copyright 2023 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 //go:build (arm64 || amd64) && !openbsd 18 19 // Package pebble implements the key-value database layer based on pebble. 20 package pebble 21 22 import ( 23 "bytes" 24 "fmt" 25 "runtime" 26 "sync" 27 "sync/atomic" 28 "time" 29 30 "github.com/cockroachdb/pebble" 31 "github.com/cockroachdb/pebble/bloom" 32 "github.com/theQRL/go-zond/common" 33 "github.com/theQRL/go-zond/log" 34 "github.com/theQRL/go-zond/metrics" 35 "github.com/theQRL/go-zond/zonddb" 36 ) 37 38 const ( 39 // minCache is the minimum amount of memory in megabytes to allocate to pebble 40 // read and write caching, split half and half. 41 minCache = 16 42 43 // minHandles is the minimum number of files handles to allocate to the open 44 // database files. 45 minHandles = 16 46 47 // metricsGatheringInterval specifies the interval to retrieve pebble database 48 // compaction, io and pause stats to report to the user. 49 metricsGatheringInterval = 3 * time.Second 50 ) 51 52 // Database is a persistent key-value store based on the pebble storage engine. 53 // Apart from basic data storage functionality it also supports batch writes and 54 // iterating over the keyspace in binary-alphabetical order. 55 type Database struct { 56 fn string // filename for reporting 57 db *pebble.DB // Underlying pebble storage engine 58 59 compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction 60 compReadMeter metrics.Meter // Meter for measuring the data read during compaction 61 compWriteMeter metrics.Meter // Meter for measuring the data written during compaction 62 writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction 63 writeDelayMeter metrics.Meter // Meter for measuring the write delay duration due to database compaction 64 diskSizeGauge metrics.Gauge // Gauge for tracking the size of all the levels in the database 65 diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read 66 diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written 67 memCompGauge metrics.Gauge // Gauge for tracking the number of memory compaction 68 level0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in level0 69 nonlevel0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in non0 level 70 seekCompGauge metrics.Gauge // Gauge for tracking the number of table compaction caused by read opt 71 manualMemAllocGauge metrics.Gauge // Gauge for tracking amount of non-managed memory currently allocated 72 73 levelsGauge []metrics.Gauge // Gauge for tracking the number of tables in levels 74 75 quitLock sync.RWMutex // Mutex protecting the quit channel and the closed flag 76 quitChan chan chan error // Quit channel to stop the metrics collection before closing the database 77 closed bool // keep track of whether we're Closed 78 79 log log.Logger // Contextual logger tracking the database path 80 81 activeComp int // Current number of active compactions 82 compStartTime time.Time // The start time of the earliest currently-active compaction 83 compTime atomic.Int64 // Total time spent in compaction in ns 84 level0Comp atomic.Uint32 // Total number of level-zero compactions 85 nonLevel0Comp atomic.Uint32 // Total number of non level-zero compactions 86 writeDelayStartTime time.Time // The start time of the latest write stall 87 writeDelayCount atomic.Int64 // Total number of write stall counts 88 writeDelayTime atomic.Int64 // Total time spent in write stalls 89 90 writeOptions *pebble.WriteOptions 91 } 92 93 func (d *Database) onCompactionBegin(info pebble.CompactionInfo) { 94 if d.activeComp == 0 { 95 d.compStartTime = time.Now() 96 } 97 l0 := info.Input[0] 98 if l0.Level == 0 { 99 d.level0Comp.Add(1) 100 } else { 101 d.nonLevel0Comp.Add(1) 102 } 103 d.activeComp++ 104 } 105 106 func (d *Database) onCompactionEnd(info pebble.CompactionInfo) { 107 if d.activeComp == 1 { 108 d.compTime.Add(int64(time.Since(d.compStartTime))) 109 } else if d.activeComp == 0 { 110 panic("should not happen") 111 } 112 d.activeComp-- 113 } 114 115 func (d *Database) onWriteStallBegin(b pebble.WriteStallBeginInfo) { 116 d.writeDelayStartTime = time.Now() 117 } 118 119 func (d *Database) onWriteStallEnd() { 120 d.writeDelayTime.Add(int64(time.Since(d.writeDelayStartTime))) 121 } 122 123 // panicLogger is just a noop logger to disable Pebble's internal logger. 124 // 125 // TODO(karalabe): Remove when Pebble sets this as the default. 126 type panicLogger struct{} 127 128 func (l panicLogger) Infof(format string, args ...interface{}) { 129 } 130 131 func (l panicLogger) Errorf(format string, args ...interface{}) { 132 } 133 134 func (l panicLogger) Fatalf(format string, args ...interface{}) { 135 panic(fmt.Errorf("fatal: "+format, args...)) 136 } 137 138 // New returns a wrapped pebble DB object. The namespace is the prefix that the 139 // metrics reporting should use for surfacing internal stats. 140 func New(file string, cache int, handles int, namespace string, readonly bool, ephemeral bool) (*Database, error) { 141 // Ensure we have some minimal caching and file guarantees 142 if cache < minCache { 143 cache = minCache 144 } 145 if handles < minHandles { 146 handles = minHandles 147 } 148 logger := log.New("database", file) 149 logger.Info("Allocated cache and file handles", "cache", common.StorageSize(cache*1024*1024), "handles", handles) 150 151 // The max memtable size is limited by the uint32 offsets stored in 152 // internal/arenaskl.node, DeferredBatchOp, and flushableBatchEntry. 153 // 154 // - MaxUint32 on 64-bit platforms; 155 // - MaxInt on 32-bit platforms. 156 // 157 // It is used when slices are limited to Uint32 on 64-bit platforms (the 158 // length limit for slices is naturally MaxInt on 32-bit platforms). 159 // 160 // Taken from https://github.com/cockroachdb/pebble/blob/master/internal/constants/constants.go 161 maxMemTableSize := (1<<31)<<(^uint(0)>>63) - 1 162 163 // Two memory tables is configured which is identical to leveldb, 164 // including a frozen memory table and another live one. 165 memTableLimit := 2 166 memTableSize := cache * 1024 * 1024 / 2 / memTableLimit 167 168 // The memory table size is currently capped at maxMemTableSize-1 due to a 169 // known bug in the pebble where maxMemTableSize is not recognized as a 170 // valid size. 171 // 172 // TODO use the maxMemTableSize as the maximum table size once the issue 173 // in pebble is fixed. 174 if memTableSize >= maxMemTableSize { 175 memTableSize = maxMemTableSize - 1 176 } 177 db := &Database{ 178 fn: file, 179 log: logger, 180 quitChan: make(chan chan error), 181 writeOptions: &pebble.WriteOptions{Sync: !ephemeral}, 182 } 183 opt := &pebble.Options{ 184 // Pebble has a single combined cache area and the write 185 // buffers are taken from this too. Assign all available 186 // memory allowance for cache. 187 Cache: pebble.NewCache(int64(cache * 1024 * 1024)), 188 MaxOpenFiles: handles, 189 190 // The size of memory table(as well as the write buffer). 191 // Note, there may have more than two memory tables in the system. 192 MemTableSize: uint64(memTableSize), 193 194 // MemTableStopWritesThreshold places a hard limit on the size 195 // of the existent MemTables(including the frozen one). 196 // Note, this must be the number of tables not the size of all memtables 197 // according to https://github.com/cockroachdb/pebble/blob/master/options.go#L738-L742 198 // and to https://github.com/cockroachdb/pebble/blob/master/db.go#L1892-L1903. 199 MemTableStopWritesThreshold: memTableLimit, 200 201 // The default compaction concurrency(1 thread), 202 // Here use all available CPUs for faster compaction. 203 MaxConcurrentCompactions: func() int { return runtime.NumCPU() }, 204 205 // Per-level options. Options for at least one level must be specified. The 206 // options for the last level are used for all subsequent levels. 207 Levels: []pebble.LevelOptions{ 208 {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, 209 {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, 210 {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, 211 {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, 212 {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, 213 {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, 214 {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, 215 }, 216 ReadOnly: readonly, 217 EventListener: &pebble.EventListener{ 218 CompactionBegin: db.onCompactionBegin, 219 CompactionEnd: db.onCompactionEnd, 220 WriteStallBegin: db.onWriteStallBegin, 221 WriteStallEnd: db.onWriteStallEnd, 222 }, 223 Logger: panicLogger{}, // TODO(karalabe): Delete when this is upstreamed in Pebble 224 } 225 // Disable seek compaction explicitly. Check https://github.com/ethereum/go-ethereum/pull/20130 226 // for more details. 227 opt.Experimental.ReadSamplingMultiplier = -1 228 229 // Open the db and recover any potential corruptions 230 innerDB, err := pebble.Open(file, opt) 231 if err != nil { 232 return nil, err 233 } 234 db.db = innerDB 235 236 db.compTimeMeter = metrics.NewRegisteredMeter(namespace+"compact/time", nil) 237 db.compReadMeter = metrics.NewRegisteredMeter(namespace+"compact/input", nil) 238 db.compWriteMeter = metrics.NewRegisteredMeter(namespace+"compact/output", nil) 239 db.diskSizeGauge = metrics.NewRegisteredGauge(namespace+"disk/size", nil) 240 db.diskReadMeter = metrics.NewRegisteredMeter(namespace+"disk/read", nil) 241 db.diskWriteMeter = metrics.NewRegisteredMeter(namespace+"disk/write", nil) 242 db.writeDelayMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/duration", nil) 243 db.writeDelayNMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/counter", nil) 244 db.memCompGauge = metrics.NewRegisteredGauge(namespace+"compact/memory", nil) 245 db.level0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/level0", nil) 246 db.nonlevel0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/nonlevel0", nil) 247 db.seekCompGauge = metrics.NewRegisteredGauge(namespace+"compact/seek", nil) 248 db.manualMemAllocGauge = metrics.NewRegisteredGauge(namespace+"memory/manualalloc", nil) 249 250 // Start up the metrics gathering and return 251 go db.meter(metricsGatheringInterval, namespace) 252 return db, nil 253 } 254 255 // Close stops the metrics collection, flushes any pending data to disk and closes 256 // all io accesses to the underlying key-value store. 257 func (d *Database) Close() error { 258 d.quitLock.Lock() 259 defer d.quitLock.Unlock() 260 // Allow double closing, simplifies things 261 if d.closed { 262 return nil 263 } 264 d.closed = true 265 if d.quitChan != nil { 266 errc := make(chan error) 267 d.quitChan <- errc 268 if err := <-errc; err != nil { 269 d.log.Error("Metrics collection failed", "err", err) 270 } 271 d.quitChan = nil 272 } 273 return d.db.Close() 274 } 275 276 // Has retrieves if a key is present in the key-value store. 277 func (d *Database) Has(key []byte) (bool, error) { 278 d.quitLock.RLock() 279 defer d.quitLock.RUnlock() 280 if d.closed { 281 return false, pebble.ErrClosed 282 } 283 _, closer, err := d.db.Get(key) 284 if err == pebble.ErrNotFound { 285 return false, nil 286 } else if err != nil { 287 return false, err 288 } 289 closer.Close() 290 return true, nil 291 } 292 293 // Get retrieves the given key if it's present in the key-value store. 294 func (d *Database) Get(key []byte) ([]byte, error) { 295 d.quitLock.RLock() 296 defer d.quitLock.RUnlock() 297 if d.closed { 298 return nil, pebble.ErrClosed 299 } 300 dat, closer, err := d.db.Get(key) 301 if err != nil { 302 return nil, err 303 } 304 ret := make([]byte, len(dat)) 305 copy(ret, dat) 306 closer.Close() 307 return ret, nil 308 } 309 310 // Put inserts the given value into the key-value store. 311 func (d *Database) Put(key []byte, value []byte) error { 312 d.quitLock.RLock() 313 defer d.quitLock.RUnlock() 314 if d.closed { 315 return pebble.ErrClosed 316 } 317 return d.db.Set(key, value, d.writeOptions) 318 } 319 320 // Delete removes the key from the key-value store. 321 func (d *Database) Delete(key []byte) error { 322 d.quitLock.RLock() 323 defer d.quitLock.RUnlock() 324 if d.closed { 325 return pebble.ErrClosed 326 } 327 return d.db.Delete(key, nil) 328 } 329 330 // NewBatch creates a write-only key-value store that buffers changes to its host 331 // database until a final write is called. 332 func (d *Database) NewBatch() zonddb.Batch { 333 return &batch{ 334 b: d.db.NewBatch(), 335 db: d, 336 } 337 } 338 339 // NewBatchWithSize creates a write-only database batch with pre-allocated buffer. 340 func (d *Database) NewBatchWithSize(size int) zonddb.Batch { 341 return &batch{ 342 b: d.db.NewBatchWithSize(size), 343 db: d, 344 } 345 } 346 347 // snapshot wraps a pebble snapshot for implementing the Snapshot interface. 348 type snapshot struct { 349 db *pebble.Snapshot 350 } 351 352 // NewSnapshot creates a database snapshot based on the current state. 353 // The created snapshot will not be affected by all following mutations 354 // happened on the database. 355 // Note don't forget to release the snapshot once it's used up, otherwise 356 // the stale data will never be cleaned up by the underlying compactor. 357 func (d *Database) NewSnapshot() (zonddb.Snapshot, error) { 358 snap := d.db.NewSnapshot() 359 return &snapshot{db: snap}, nil 360 } 361 362 // Has retrieves if a key is present in the snapshot backing by a key-value 363 // data store. 364 func (snap *snapshot) Has(key []byte) (bool, error) { 365 _, closer, err := snap.db.Get(key) 366 if err != nil { 367 if err != pebble.ErrNotFound { 368 return false, err 369 } else { 370 return false, nil 371 } 372 } 373 closer.Close() 374 return true, nil 375 } 376 377 // Get retrieves the given key if it's present in the snapshot backing by 378 // key-value data store. 379 func (snap *snapshot) Get(key []byte) ([]byte, error) { 380 dat, closer, err := snap.db.Get(key) 381 if err != nil { 382 return nil, err 383 } 384 ret := make([]byte, len(dat)) 385 copy(ret, dat) 386 closer.Close() 387 return ret, nil 388 } 389 390 // Release releases associated resources. Release should always succeed and can 391 // be called multiple times without causing error. 392 func (snap *snapshot) Release() { 393 snap.db.Close() 394 } 395 396 // upperBound returns the upper bound for the given prefix 397 func upperBound(prefix []byte) (limit []byte) { 398 for i := len(prefix) - 1; i >= 0; i-- { 399 c := prefix[i] 400 if c == 0xff { 401 continue 402 } 403 limit = make([]byte, i+1) 404 copy(limit, prefix) 405 limit[i] = c + 1 406 break 407 } 408 return limit 409 } 410 411 // Stat returns the internal metrics of Pebble in a text format. It's a developer 412 // method to read everything there is to read independent of Pebble version. 413 // 414 // The property is unused in Pebble as there's only one thing to retrieve. 415 func (d *Database) Stat(property string) (string, error) { 416 return d.db.Metrics().String(), nil 417 } 418 419 // Compact flattens the underlying data store for the given key range. In essence, 420 // deleted and overwritten versions are discarded, and the data is rearranged to 421 // reduce the cost of operations needed to access them. 422 // 423 // A nil start is treated as a key before all keys in the data store; a nil limit 424 // is treated as a key after all keys in the data store. If both is nil then it 425 // will compact entire data store. 426 func (d *Database) Compact(start []byte, limit []byte) error { 427 // There is no special flag to represent the end of key range 428 // in pebble(nil in leveldb). Use an ugly hack to construct a 429 // large key to represent it. 430 // Note any prefixed database entry will be smaller than this 431 // flag, as for trie nodes we need the 32 byte 0xff because 432 // there might be a shared prefix starting with a number of 433 // 0xff-s, so 32 ensures than only a hash collision could touch it. 434 // https://github.com/cockroachdb/pebble/issues/2359#issuecomment-1443995833 435 if limit == nil { 436 limit = bytes.Repeat([]byte{0xff}, 32) 437 } 438 return d.db.Compact(start, limit, true) // Parallelization is preferred 439 } 440 441 // Path returns the path to the database directory. 442 func (d *Database) Path() string { 443 return d.fn 444 } 445 446 // meter periodically retrieves internal pebble counters and reports them to 447 // the metrics subsystem. 448 func (d *Database) meter(refresh time.Duration, namespace string) { 449 var errc chan error 450 timer := time.NewTimer(refresh) 451 defer timer.Stop() 452 453 // Create storage and warning log tracer for write delay. 454 var ( 455 compTimes [2]int64 456 writeDelayTimes [2]int64 457 writeDelayCounts [2]int64 458 compWrites [2]int64 459 compReads [2]int64 460 461 nWrites [2]int64 462 ) 463 464 // Iterate ad infinitum and collect the stats 465 for i := 1; errc == nil; i++ { 466 var ( 467 compWrite int64 468 compRead int64 469 nWrite int64 470 471 stats = d.db.Metrics() 472 compTime = d.compTime.Load() 473 writeDelayCount = d.writeDelayCount.Load() 474 writeDelayTime = d.writeDelayTime.Load() 475 nonLevel0CompCount = int64(d.nonLevel0Comp.Load()) 476 level0CompCount = int64(d.level0Comp.Load()) 477 ) 478 writeDelayTimes[i%2] = writeDelayTime 479 writeDelayCounts[i%2] = writeDelayCount 480 compTimes[i%2] = compTime 481 482 for _, levelMetrics := range stats.Levels { 483 nWrite += int64(levelMetrics.BytesCompacted) 484 nWrite += int64(levelMetrics.BytesFlushed) 485 compWrite += int64(levelMetrics.BytesCompacted) 486 compRead += int64(levelMetrics.BytesRead) 487 } 488 489 nWrite += int64(stats.WAL.BytesWritten) 490 491 compWrites[i%2] = compWrite 492 compReads[i%2] = compRead 493 nWrites[i%2] = nWrite 494 495 if d.writeDelayNMeter != nil { 496 d.writeDelayNMeter.Mark(writeDelayCounts[i%2] - writeDelayCounts[(i-1)%2]) 497 } 498 if d.writeDelayMeter != nil { 499 d.writeDelayMeter.Mark(writeDelayTimes[i%2] - writeDelayTimes[(i-1)%2]) 500 } 501 if d.compTimeMeter != nil { 502 d.compTimeMeter.Mark(compTimes[i%2] - compTimes[(i-1)%2]) 503 } 504 if d.compReadMeter != nil { 505 d.compReadMeter.Mark(compReads[i%2] - compReads[(i-1)%2]) 506 } 507 if d.compWriteMeter != nil { 508 d.compWriteMeter.Mark(compWrites[i%2] - compWrites[(i-1)%2]) 509 } 510 if d.diskSizeGauge != nil { 511 d.diskSizeGauge.Update(int64(stats.DiskSpaceUsage())) 512 } 513 if d.diskReadMeter != nil { 514 d.diskReadMeter.Mark(0) // pebble doesn't track non-compaction reads 515 } 516 if d.diskWriteMeter != nil { 517 d.diskWriteMeter.Mark(nWrites[i%2] - nWrites[(i-1)%2]) 518 } 519 // See https://github.com/cockroachdb/pebble/pull/1628#pullrequestreview-1026664054 520 manuallyAllocated := stats.BlockCache.Size + int64(stats.MemTable.Size) + int64(stats.MemTable.ZombieSize) 521 d.manualMemAllocGauge.Update(manuallyAllocated) 522 d.memCompGauge.Update(stats.Flush.Count) 523 d.nonlevel0CompGauge.Update(nonLevel0CompCount) 524 d.level0CompGauge.Update(level0CompCount) 525 d.seekCompGauge.Update(stats.Compact.ReadCount) 526 527 for i, level := range stats.Levels { 528 // Append metrics for additional layers 529 if i >= len(d.levelsGauge) { 530 d.levelsGauge = append(d.levelsGauge, metrics.NewRegisteredGauge(namespace+fmt.Sprintf("tables/level%v", i), nil)) 531 } 532 d.levelsGauge[i].Update(level.NumFiles) 533 } 534 535 // Sleep a bit, then repeat the stats collection 536 select { 537 case errc = <-d.quitChan: 538 // Quit requesting, stop hammering the database 539 case <-timer.C: 540 timer.Reset(refresh) 541 // Timeout, gather a new set of stats 542 } 543 } 544 errc <- nil 545 } 546 547 // batch is a write-only batch that commits changes to its host database 548 // when Write is called. A batch cannot be used concurrently. 549 type batch struct { 550 b *pebble.Batch 551 db *Database 552 size int 553 } 554 555 // Put inserts the given value into the batch for later committing. 556 func (b *batch) Put(key, value []byte) error { 557 b.b.Set(key, value, nil) 558 b.size += len(key) + len(value) 559 return nil 560 } 561 562 // Delete inserts the a key removal into the batch for later committing. 563 func (b *batch) Delete(key []byte) error { 564 b.b.Delete(key, nil) 565 b.size += len(key) 566 return nil 567 } 568 569 // ValueSize retrieves the amount of data queued up for writing. 570 func (b *batch) ValueSize() int { 571 return b.size 572 } 573 574 // Write flushes any accumulated data to disk. 575 func (b *batch) Write() error { 576 b.db.quitLock.RLock() 577 defer b.db.quitLock.RUnlock() 578 if b.db.closed { 579 return pebble.ErrClosed 580 } 581 return b.b.Commit(b.db.writeOptions) 582 } 583 584 // Reset resets the batch for reuse. 585 func (b *batch) Reset() { 586 b.b.Reset() 587 b.size = 0 588 } 589 590 // Replay replays the batch contents. 591 func (b *batch) Replay(w zonddb.KeyValueWriter) error { 592 reader := b.b.Reader() 593 for { 594 kind, k, v, ok, err := reader.Next() 595 if !ok || err != nil { 596 break 597 } 598 // The (k,v) slices might be overwritten if the batch is reset/reused, 599 // and the receiver should copy them if they are to be retained long-term. 600 if kind == pebble.InternalKeyKindSet { 601 w.Put(k, v) 602 } else if kind == pebble.InternalKeyKindDelete { 603 w.Delete(k) 604 } else { 605 return fmt.Errorf("unhandled operation, keytype: %v", kind) 606 } 607 } 608 return nil 609 } 610 611 // pebbleIterator is a wrapper of underlying iterator in storage engine. 612 // The purpose of this structure is to implement the missing APIs. 613 // 614 // The pebble iterator is not thread-safe. 615 type pebbleIterator struct { 616 iter *pebble.Iterator 617 moved bool 618 released bool 619 } 620 621 // NewIterator creates a binary-alphabetical iterator over a subset 622 // of database content with a particular key prefix, starting at a particular 623 // initial key (or after, if it does not exist). 624 func (d *Database) NewIterator(prefix []byte, start []byte) zonddb.Iterator { 625 iter, _ := d.db.NewIter(&pebble.IterOptions{ 626 LowerBound: append(prefix, start...), 627 UpperBound: upperBound(prefix), 628 }) 629 iter.First() 630 return &pebbleIterator{iter: iter, moved: true, released: false} 631 } 632 633 // Next moves the iterator to the next key/value pair. It returns whether the 634 // iterator is exhausted. 635 func (iter *pebbleIterator) Next() bool { 636 if iter.moved { 637 iter.moved = false 638 return iter.iter.Valid() 639 } 640 return iter.iter.Next() 641 } 642 643 // Error returns any accumulated error. Exhausting all the key/value pairs 644 // is not considered to be an error. 645 func (iter *pebbleIterator) Error() error { 646 return iter.iter.Error() 647 } 648 649 // Key returns the key of the current key/value pair, or nil if done. The caller 650 // should not modify the contents of the returned slice, and its contents may 651 // change on the next call to Next. 652 func (iter *pebbleIterator) Key() []byte { 653 return iter.iter.Key() 654 } 655 656 // Value returns the value of the current key/value pair, or nil if done. The 657 // caller should not modify the contents of the returned slice, and its contents 658 // may change on the next call to Next. 659 func (iter *pebbleIterator) Value() []byte { 660 return iter.iter.Value() 661 } 662 663 // Release releases associated resources. Release should always succeed and can 664 // be called multiple times without causing error. 665 func (iter *pebbleIterator) Release() { 666 if !iter.released { 667 iter.iter.Close() 668 iter.released = true 669 } 670 }