github.com/theQRL/go-zond@v0.1.1/zonddb/pebble/pebble.go (about)

     1  // Copyright 2023 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  //go:build (arm64 || amd64) && !openbsd
    18  
    19  // Package pebble implements the key-value database layer based on pebble.
    20  package pebble
    21  
    22  import (
    23  	"bytes"
    24  	"fmt"
    25  	"runtime"
    26  	"sync"
    27  	"sync/atomic"
    28  	"time"
    29  
    30  	"github.com/cockroachdb/pebble"
    31  	"github.com/cockroachdb/pebble/bloom"
    32  	"github.com/theQRL/go-zond/common"
    33  	"github.com/theQRL/go-zond/zonddb"
    34  	"github.com/theQRL/go-zond/log"
    35  	"github.com/theQRL/go-zond/metrics"
    36  )
    37  
    38  const (
    39  	// minCache is the minimum amount of memory in megabytes to allocate to pebble
    40  	// read and write caching, split half and half.
    41  	minCache = 16
    42  
    43  	// minHandles is the minimum number of files handles to allocate to the open
    44  	// database files.
    45  	minHandles = 16
    46  
    47  	// metricsGatheringInterval specifies the interval to retrieve pebble database
    48  	// compaction, io and pause stats to report to the user.
    49  	metricsGatheringInterval = 3 * time.Second
    50  )
    51  
    52  // Database is a persistent key-value store based on the pebble storage engine.
    53  // Apart from basic data storage functionality it also supports batch writes and
    54  // iterating over the keyspace in binary-alphabetical order.
    55  type Database struct {
    56  	fn string     // filename for reporting
    57  	db *pebble.DB // Underlying pebble storage engine
    58  
    59  	compTimeMeter       metrics.Meter // Meter for measuring the total time spent in database compaction
    60  	compReadMeter       metrics.Meter // Meter for measuring the data read during compaction
    61  	compWriteMeter      metrics.Meter // Meter for measuring the data written during compaction
    62  	writeDelayNMeter    metrics.Meter // Meter for measuring the write delay number due to database compaction
    63  	writeDelayMeter     metrics.Meter // Meter for measuring the write delay duration due to database compaction
    64  	diskSizeGauge       metrics.Gauge // Gauge for tracking the size of all the levels in the database
    65  	diskReadMeter       metrics.Meter // Meter for measuring the effective amount of data read
    66  	diskWriteMeter      metrics.Meter // Meter for measuring the effective amount of data written
    67  	memCompGauge        metrics.Gauge // Gauge for tracking the number of memory compaction
    68  	level0CompGauge     metrics.Gauge // Gauge for tracking the number of table compaction in level0
    69  	nonlevel0CompGauge  metrics.Gauge // Gauge for tracking the number of table compaction in non0 level
    70  	seekCompGauge       metrics.Gauge // Gauge for tracking the number of table compaction caused by read opt
    71  	manualMemAllocGauge metrics.Gauge // Gauge for tracking amount of non-managed memory currently allocated
    72  
    73  	quitLock sync.RWMutex    // Mutex protecting the quit channel and the closed flag
    74  	quitChan chan chan error // Quit channel to stop the metrics collection before closing the database
    75  	closed   bool            // keep track of whether we're Closed
    76  
    77  	log log.Logger // Contextual logger tracking the database path
    78  
    79  	activeComp          int           // Current number of active compactions
    80  	compStartTime       time.Time     // The start time of the earliest currently-active compaction
    81  	compTime            atomic.Int64  // Total time spent in compaction in ns
    82  	level0Comp          atomic.Uint32 // Total number of level-zero compactions
    83  	nonLevel0Comp       atomic.Uint32 // Total number of non level-zero compactions
    84  	writeDelayStartTime time.Time     // The start time of the latest write stall
    85  	writeDelayCount     atomic.Int64  // Total number of write stall counts
    86  	writeDelayTime      atomic.Int64  // Total time spent in write stalls
    87  
    88  	writeOptions *pebble.WriteOptions
    89  }
    90  
    91  func (d *Database) onCompactionBegin(info pebble.CompactionInfo) {
    92  	if d.activeComp == 0 {
    93  		d.compStartTime = time.Now()
    94  	}
    95  	l0 := info.Input[0]
    96  	if l0.Level == 0 {
    97  		d.level0Comp.Add(1)
    98  	} else {
    99  		d.nonLevel0Comp.Add(1)
   100  	}
   101  	d.activeComp++
   102  }
   103  
   104  func (d *Database) onCompactionEnd(info pebble.CompactionInfo) {
   105  	if d.activeComp == 1 {
   106  		d.compTime.Add(int64(time.Since(d.compStartTime)))
   107  	} else if d.activeComp == 0 {
   108  		panic("should not happen")
   109  	}
   110  	d.activeComp--
   111  }
   112  
   113  func (d *Database) onWriteStallBegin(b pebble.WriteStallBeginInfo) {
   114  	d.writeDelayStartTime = time.Now()
   115  }
   116  
   117  func (d *Database) onWriteStallEnd() {
   118  	d.writeDelayTime.Add(int64(time.Since(d.writeDelayStartTime)))
   119  }
   120  
   121  // New returns a wrapped pebble DB object. The namespace is the prefix that the
   122  // metrics reporting should use for surfacing internal stats.
   123  func New(file string, cache int, handles int, namespace string, readonly bool, ephemeral bool) (*Database, error) {
   124  	// Ensure we have some minimal caching and file guarantees
   125  	if cache < minCache {
   126  		cache = minCache
   127  	}
   128  	if handles < minHandles {
   129  		handles = minHandles
   130  	}
   131  	logger := log.New("database", file)
   132  	logger.Info("Allocated cache and file handles", "cache", common.StorageSize(cache*1024*1024), "handles", handles)
   133  
   134  	// The max memtable size is limited by the uint32 offsets stored in
   135  	// internal/arenaskl.node, DeferredBatchOp, and flushableBatchEntry.
   136  	// Taken from https://github.com/cockroachdb/pebble/blob/master/open.go#L38
   137  	maxMemTableSize := 4<<30 - 1 // Capped by 4 GB
   138  
   139  	// Two memory tables is configured which is identical to leveldb,
   140  	// including a frozen memory table and another live one.
   141  	memTableLimit := 2
   142  	memTableSize := cache * 1024 * 1024 / 2 / memTableLimit
   143  	if memTableSize > maxMemTableSize {
   144  		memTableSize = maxMemTableSize
   145  	}
   146  	db := &Database{
   147  		fn:           file,
   148  		log:          logger,
   149  		quitChan:     make(chan chan error),
   150  		writeOptions: &pebble.WriteOptions{Sync: !ephemeral},
   151  	}
   152  	opt := &pebble.Options{
   153  		// Pebble has a single combined cache area and the write
   154  		// buffers are taken from this too. Assign all available
   155  		// memory allowance for cache.
   156  		Cache:        pebble.NewCache(int64(cache * 1024 * 1024)),
   157  		MaxOpenFiles: handles,
   158  
   159  		// The size of memory table(as well as the write buffer).
   160  		// Note, there may have more than two memory tables in the system.
   161  		MemTableSize: memTableSize,
   162  
   163  		// MemTableStopWritesThreshold places a hard limit on the size
   164  		// of the existent MemTables(including the frozen one).
   165  		// Note, this must be the number of tables not the size of all memtables
   166  		// according to https://github.com/cockroachdb/pebble/blob/master/options.go#L738-L742
   167  		// and to https://github.com/cockroachdb/pebble/blob/master/db.go#L1892-L1903.
   168  		MemTableStopWritesThreshold: memTableLimit,
   169  
   170  		// The default compaction concurrency(1 thread),
   171  		// Here use all available CPUs for faster compaction.
   172  		MaxConcurrentCompactions: func() int { return runtime.NumCPU() },
   173  
   174  		// Per-level options. Options for at least one level must be specified. The
   175  		// options for the last level are used for all subsequent levels.
   176  		Levels: []pebble.LevelOptions{
   177  			{TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
   178  			{TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
   179  			{TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
   180  			{TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
   181  			{TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
   182  			{TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
   183  			{TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
   184  		},
   185  		ReadOnly: readonly,
   186  		EventListener: &pebble.EventListener{
   187  			CompactionBegin: db.onCompactionBegin,
   188  			CompactionEnd:   db.onCompactionEnd,
   189  			WriteStallBegin: db.onWriteStallBegin,
   190  			WriteStallEnd:   db.onWriteStallEnd,
   191  		},
   192  	}
   193  	// Disable seek compaction explicitly. Check https://github.com/theQRL/go-zond/pull/20130
   194  	// for more details.
   195  	opt.Experimental.ReadSamplingMultiplier = -1
   196  
   197  	// Open the db and recover any potential corruptions
   198  	innerDB, err := pebble.Open(file, opt)
   199  	if err != nil {
   200  		return nil, err
   201  	}
   202  	db.db = innerDB
   203  
   204  	db.compTimeMeter = metrics.NewRegisteredMeter(namespace+"compact/time", nil)
   205  	db.compReadMeter = metrics.NewRegisteredMeter(namespace+"compact/input", nil)
   206  	db.compWriteMeter = metrics.NewRegisteredMeter(namespace+"compact/output", nil)
   207  	db.diskSizeGauge = metrics.NewRegisteredGauge(namespace+"disk/size", nil)
   208  	db.diskReadMeter = metrics.NewRegisteredMeter(namespace+"disk/read", nil)
   209  	db.diskWriteMeter = metrics.NewRegisteredMeter(namespace+"disk/write", nil)
   210  	db.writeDelayMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/duration", nil)
   211  	db.writeDelayNMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/counter", nil)
   212  	db.memCompGauge = metrics.NewRegisteredGauge(namespace+"compact/memory", nil)
   213  	db.level0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/level0", nil)
   214  	db.nonlevel0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/nonlevel0", nil)
   215  	db.seekCompGauge = metrics.NewRegisteredGauge(namespace+"compact/seek", nil)
   216  	db.manualMemAllocGauge = metrics.NewRegisteredGauge(namespace+"memory/manualalloc", nil)
   217  
   218  	// Start up the metrics gathering and return
   219  	go db.meter(metricsGatheringInterval)
   220  	return db, nil
   221  }
   222  
   223  // Close stops the metrics collection, flushes any pending data to disk and closes
   224  // all io accesses to the underlying key-value store.
   225  func (d *Database) Close() error {
   226  	d.quitLock.Lock()
   227  	defer d.quitLock.Unlock()
   228  	// Allow double closing, simplifies things
   229  	if d.closed {
   230  		return nil
   231  	}
   232  	d.closed = true
   233  	if d.quitChan != nil {
   234  		errc := make(chan error)
   235  		d.quitChan <- errc
   236  		if err := <-errc; err != nil {
   237  			d.log.Error("Metrics collection failed", "err", err)
   238  		}
   239  		d.quitChan = nil
   240  	}
   241  	return d.db.Close()
   242  }
   243  
   244  // Has retrieves if a key is present in the key-value store.
   245  func (d *Database) Has(key []byte) (bool, error) {
   246  	d.quitLock.RLock()
   247  	defer d.quitLock.RUnlock()
   248  	if d.closed {
   249  		return false, pebble.ErrClosed
   250  	}
   251  	_, closer, err := d.db.Get(key)
   252  	if err == pebble.ErrNotFound {
   253  		return false, nil
   254  	} else if err != nil {
   255  		return false, err
   256  	}
   257  	closer.Close()
   258  	return true, nil
   259  }
   260  
   261  // Get retrieves the given key if it's present in the key-value store.
   262  func (d *Database) Get(key []byte) ([]byte, error) {
   263  	d.quitLock.RLock()
   264  	defer d.quitLock.RUnlock()
   265  	if d.closed {
   266  		return nil, pebble.ErrClosed
   267  	}
   268  	dat, closer, err := d.db.Get(key)
   269  	if err != nil {
   270  		return nil, err
   271  	}
   272  	ret := make([]byte, len(dat))
   273  	copy(ret, dat)
   274  	closer.Close()
   275  	return ret, nil
   276  }
   277  
   278  // Put inserts the given value into the key-value store.
   279  func (d *Database) Put(key []byte, value []byte) error {
   280  	d.quitLock.RLock()
   281  	defer d.quitLock.RUnlock()
   282  	if d.closed {
   283  		return pebble.ErrClosed
   284  	}
   285  	return d.db.Set(key, value, d.writeOptions)
   286  }
   287  
   288  // Delete removes the key from the key-value store.
   289  func (d *Database) Delete(key []byte) error {
   290  	d.quitLock.RLock()
   291  	defer d.quitLock.RUnlock()
   292  	if d.closed {
   293  		return pebble.ErrClosed
   294  	}
   295  	return d.db.Delete(key, nil)
   296  }
   297  
   298  // NewBatch creates a write-only key-value store that buffers changes to its host
   299  // database until a final write is called.
   300  func (d *Database) NewBatch() zonddb.Batch {
   301  	return &batch{
   302  		b:  d.db.NewBatch(),
   303  		db: d,
   304  	}
   305  }
   306  
   307  // NewBatchWithSize creates a write-only database batch with pre-allocated buffer.
   308  // It's not supported by pebble, but pebble has better memory allocation strategy
   309  // which turns out a lot faster than leveldb. It's performant enough to construct
   310  // batch object without any pre-allocated space.
   311  func (d *Database) NewBatchWithSize(_ int) zonddb.Batch {
   312  	return &batch{
   313  		b:  d.db.NewBatch(),
   314  		db: d,
   315  	}
   316  }
   317  
   318  // snapshot wraps a pebble snapshot for implementing the Snapshot interface.
   319  type snapshot struct {
   320  	db *pebble.Snapshot
   321  }
   322  
   323  // NewSnapshot creates a database snapshot based on the current state.
   324  // The created snapshot will not be affected by all following mutations
   325  // happened on the database.
   326  // Note don't forget to release the snapshot once it's used up, otherwise
   327  // the stale data will never be cleaned up by the underlying compactor.
   328  func (d *Database) NewSnapshot() (zonddb.Snapshot, error) {
   329  	snap := d.db.NewSnapshot()
   330  	return &snapshot{db: snap}, nil
   331  }
   332  
   333  // Has retrieves if a key is present in the snapshot backing by a key-value
   334  // data store.
   335  func (snap *snapshot) Has(key []byte) (bool, error) {
   336  	_, closer, err := snap.db.Get(key)
   337  	if err != nil {
   338  		if err != pebble.ErrNotFound {
   339  			return false, err
   340  		} else {
   341  			return false, nil
   342  		}
   343  	}
   344  	closer.Close()
   345  	return true, nil
   346  }
   347  
   348  // Get retrieves the given key if it's present in the snapshot backing by
   349  // key-value data store.
   350  func (snap *snapshot) Get(key []byte) ([]byte, error) {
   351  	dat, closer, err := snap.db.Get(key)
   352  	if err != nil {
   353  		return nil, err
   354  	}
   355  	ret := make([]byte, len(dat))
   356  	copy(ret, dat)
   357  	closer.Close()
   358  	return ret, nil
   359  }
   360  
   361  // Release releases associated resources. Release should always succeed and can
   362  // be called multiple times without causing error.
   363  func (snap *snapshot) Release() {
   364  	snap.db.Close()
   365  }
   366  
   367  // upperBound returns the upper bound for the given prefix
   368  func upperBound(prefix []byte) (limit []byte) {
   369  	for i := len(prefix) - 1; i >= 0; i-- {
   370  		c := prefix[i]
   371  		if c == 0xff {
   372  			continue
   373  		}
   374  		limit = make([]byte, i+1)
   375  		copy(limit, prefix)
   376  		limit[i] = c + 1
   377  		break
   378  	}
   379  	return limit
   380  }
   381  
   382  // Stat returns a particular internal stat of the database.
   383  func (d *Database) Stat(property string) (string, error) {
   384  	return "", nil
   385  }
   386  
   387  // Compact flattens the underlying data store for the given key range. In essence,
   388  // deleted and overwritten versions are discarded, and the data is rearranged to
   389  // reduce the cost of operations needed to access them.
   390  //
   391  // A nil start is treated as a key before all keys in the data store; a nil limit
   392  // is treated as a key after all keys in the data store. If both is nil then it
   393  // will compact entire data store.
   394  func (d *Database) Compact(start []byte, limit []byte) error {
   395  	// There is no special flag to represent the end of key range
   396  	// in pebble(nil in leveldb). Use an ugly hack to construct a
   397  	// large key to represent it.
   398  	// Note any prefixed database entry will be smaller than this
   399  	// flag, as for trie nodes we need the 32 byte 0xff because
   400  	// there might be a shared prefix starting with a number of
   401  	// 0xff-s, so 32 ensures than only a hash collision could touch it.
   402  	// https://github.com/cockroachdb/pebble/issues/2359#issuecomment-1443995833
   403  	if limit == nil {
   404  		limit = bytes.Repeat([]byte{0xff}, 32)
   405  	}
   406  	return d.db.Compact(start, limit, true) // Parallelization is preferred
   407  }
   408  
   409  // Path returns the path to the database directory.
   410  func (d *Database) Path() string {
   411  	return d.fn
   412  }
   413  
   414  // meter periodically retrieves internal pebble counters and reports them to
   415  // the metrics subsystem.
   416  func (d *Database) meter(refresh time.Duration) {
   417  	var errc chan error
   418  	timer := time.NewTimer(refresh)
   419  	defer timer.Stop()
   420  
   421  	// Create storage and warning log tracer for write delay.
   422  	var (
   423  		compTimes        [2]int64
   424  		writeDelayTimes  [2]int64
   425  		writeDelayCounts [2]int64
   426  		compWrites       [2]int64
   427  		compReads        [2]int64
   428  
   429  		nWrites [2]int64
   430  	)
   431  
   432  	// Iterate ad infinitum and collect the stats
   433  	for i := 1; errc == nil; i++ {
   434  		var (
   435  			compWrite int64
   436  			compRead  int64
   437  			nWrite    int64
   438  
   439  			metrics            = d.db.Metrics()
   440  			compTime           = d.compTime.Load()
   441  			writeDelayCount    = d.writeDelayCount.Load()
   442  			writeDelayTime     = d.writeDelayTime.Load()
   443  			nonLevel0CompCount = int64(d.nonLevel0Comp.Load())
   444  			level0CompCount    = int64(d.level0Comp.Load())
   445  		)
   446  		writeDelayTimes[i%2] = writeDelayTime
   447  		writeDelayCounts[i%2] = writeDelayCount
   448  		compTimes[i%2] = compTime
   449  
   450  		for _, levelMetrics := range metrics.Levels {
   451  			nWrite += int64(levelMetrics.BytesCompacted)
   452  			nWrite += int64(levelMetrics.BytesFlushed)
   453  			compWrite += int64(levelMetrics.BytesCompacted)
   454  			compRead += int64(levelMetrics.BytesRead)
   455  		}
   456  
   457  		nWrite += int64(metrics.WAL.BytesWritten)
   458  
   459  		compWrites[i%2] = compWrite
   460  		compReads[i%2] = compRead
   461  		nWrites[i%2] = nWrite
   462  
   463  		if d.writeDelayNMeter != nil {
   464  			d.writeDelayNMeter.Mark(writeDelayCounts[i%2] - writeDelayCounts[(i-1)%2])
   465  		}
   466  		if d.writeDelayMeter != nil {
   467  			d.writeDelayMeter.Mark(writeDelayTimes[i%2] - writeDelayTimes[(i-1)%2])
   468  		}
   469  		if d.compTimeMeter != nil {
   470  			d.compTimeMeter.Mark(compTimes[i%2] - compTimes[(i-1)%2])
   471  		}
   472  		if d.compReadMeter != nil {
   473  			d.compReadMeter.Mark(compReads[i%2] - compReads[(i-1)%2])
   474  		}
   475  		if d.compWriteMeter != nil {
   476  			d.compWriteMeter.Mark(compWrites[i%2] - compWrites[(i-1)%2])
   477  		}
   478  		if d.diskSizeGauge != nil {
   479  			d.diskSizeGauge.Update(int64(metrics.DiskSpaceUsage()))
   480  		}
   481  		if d.diskReadMeter != nil {
   482  			d.diskReadMeter.Mark(0) // pebble doesn't track non-compaction reads
   483  		}
   484  		if d.diskWriteMeter != nil {
   485  			d.diskWriteMeter.Mark(nWrites[i%2] - nWrites[(i-1)%2])
   486  		}
   487  		// See https://github.com/cockroachdb/pebble/pull/1628#pullrequestreview-1026664054
   488  		manuallyAllocated := metrics.BlockCache.Size + int64(metrics.MemTable.Size) + int64(metrics.MemTable.ZombieSize)
   489  		d.manualMemAllocGauge.Update(manuallyAllocated)
   490  		d.memCompGauge.Update(metrics.Flush.Count)
   491  		d.nonlevel0CompGauge.Update(nonLevel0CompCount)
   492  		d.level0CompGauge.Update(level0CompCount)
   493  		d.seekCompGauge.Update(metrics.Compact.ReadCount)
   494  
   495  		// Sleep a bit, then repeat the stats collection
   496  		select {
   497  		case errc = <-d.quitChan:
   498  			// Quit requesting, stop hammering the database
   499  		case <-timer.C:
   500  			timer.Reset(refresh)
   501  			// Timeout, gather a new set of stats
   502  		}
   503  	}
   504  	errc <- nil
   505  }
   506  
   507  // batch is a write-only batch that commits changes to its host database
   508  // when Write is called. A batch cannot be used concurrently.
   509  type batch struct {
   510  	b    *pebble.Batch
   511  	db   *Database
   512  	size int
   513  }
   514  
   515  // Put inserts the given value into the batch for later committing.
   516  func (b *batch) Put(key, value []byte) error {
   517  	b.b.Set(key, value, nil)
   518  	b.size += len(key) + len(value)
   519  	return nil
   520  }
   521  
   522  // Delete inserts the a key removal into the batch for later committing.
   523  func (b *batch) Delete(key []byte) error {
   524  	b.b.Delete(key, nil)
   525  	b.size += len(key)
   526  	return nil
   527  }
   528  
   529  // ValueSize retrieves the amount of data queued up for writing.
   530  func (b *batch) ValueSize() int {
   531  	return b.size
   532  }
   533  
   534  // Write flushes any accumulated data to disk.
   535  func (b *batch) Write() error {
   536  	b.db.quitLock.RLock()
   537  	defer b.db.quitLock.RUnlock()
   538  	if b.db.closed {
   539  		return pebble.ErrClosed
   540  	}
   541  	return b.b.Commit(b.db.writeOptions)
   542  }
   543  
   544  // Reset resets the batch for reuse.
   545  func (b *batch) Reset() {
   546  	b.b.Reset()
   547  	b.size = 0
   548  }
   549  
   550  // Replay replays the batch contents.
   551  func (b *batch) Replay(w zonddb.KeyValueWriter) error {
   552  	reader := b.b.Reader()
   553  	for {
   554  		kind, k, v, ok := reader.Next()
   555  		if !ok {
   556  			break
   557  		}
   558  		// The (k,v) slices might be overwritten if the batch is reset/reused,
   559  		// and the receiver should copy them if they are to be retained long-term.
   560  		if kind == pebble.InternalKeyKindSet {
   561  			w.Put(k, v)
   562  		} else if kind == pebble.InternalKeyKindDelete {
   563  			w.Delete(k)
   564  		} else {
   565  			return fmt.Errorf("unhandled operation, keytype: %v", kind)
   566  		}
   567  	}
   568  	return nil
   569  }
   570  
   571  // pebbleIterator is a wrapper of underlying iterator in storage engine.
   572  // The purpose of this structure is to implement the missing APIs.
   573  type pebbleIterator struct {
   574  	iter  *pebble.Iterator
   575  	moved bool
   576  }
   577  
   578  // NewIterator creates a binary-alphabetical iterator over a subset
   579  // of database content with a particular key prefix, starting at a particular
   580  // initial key (or after, if it does not exist).
   581  func (d *Database) NewIterator(prefix []byte, start []byte) zonddb.Iterator {
   582  	iter := d.db.NewIter(&pebble.IterOptions{
   583  		LowerBound: append(prefix, start...),
   584  		UpperBound: upperBound(prefix),
   585  	})
   586  	iter.First()
   587  	return &pebbleIterator{iter: iter, moved: true}
   588  }
   589  
   590  // Next moves the iterator to the next key/value pair. It returns whether the
   591  // iterator is exhausted.
   592  func (iter *pebbleIterator) Next() bool {
   593  	if iter.moved {
   594  		iter.moved = false
   595  		return iter.iter.Valid()
   596  	}
   597  	return iter.iter.Next()
   598  }
   599  
   600  // Error returns any accumulated error. Exhausting all the key/value pairs
   601  // is not considered to be an error.
   602  func (iter *pebbleIterator) Error() error {
   603  	return iter.iter.Error()
   604  }
   605  
   606  // Key returns the key of the current key/value pair, or nil if done. The caller
   607  // should not modify the contents of the returned slice, and its contents may
   608  // change on the next call to Next.
   609  func (iter *pebbleIterator) Key() []byte {
   610  	return iter.iter.Key()
   611  }
   612  
   613  // Value returns the value of the current key/value pair, or nil if done. The
   614  // caller should not modify the contents of the returned slice, and its contents
   615  // may change on the next call to Next.
   616  func (iter *pebbleIterator) Value() []byte {
   617  	return iter.iter.Value()
   618  }
   619  
   620  // Release releases associated resources. Release should always succeed and can
   621  // be called multiple times without causing error.
   622  func (iter *pebbleIterator) Release() { iter.iter.Close() }