github.com/klaytn/klaytn@v1.10.2/storage/database/leveldb_database.go (about)

     1  // Modifications Copyright 2018 The klaytn Authors
     2  // Copyright 2015 The go-ethereum Authors
     3  // This file is part of the go-ethereum library.
     4  //
     5  // The go-ethereum library is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Lesser General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // The go-ethereum library is distributed in the hope that it will be useful,
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  // GNU Lesser General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Lesser General Public License
    16  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    17  //
    18  // This file is derived from ethdb/database.go (2018/06/04).
    19  // Modified and improved for the klaytn development.
    20  
    21  package database
    22  
    23  import (
    24  	"fmt"
    25  	"sync"
    26  	"time"
    27  
    28  	klaytnmetrics "github.com/klaytn/klaytn/metrics"
    29  
    30  	"github.com/klaytn/klaytn/common/fdlimit"
    31  	"github.com/klaytn/klaytn/log"
    32  	metricutils "github.com/klaytn/klaytn/metrics/utils"
    33  	"github.com/rcrowley/go-metrics"
    34  	"github.com/syndtr/goleveldb/leveldb"
    35  	"github.com/syndtr/goleveldb/leveldb/errors"
    36  	"github.com/syndtr/goleveldb/leveldb/filter"
    37  	"github.com/syndtr/goleveldb/leveldb/opt"
    38  	"github.com/syndtr/goleveldb/leveldb/util"
    39  )
    40  
    41  var OpenFileLimit = 64
    42  
    43  type LevelDBCompressionType uint8
    44  
    45  const (
    46  	AllNoCompression LevelDBCompressionType = iota
    47  	ReceiptOnlySnappyCompression
    48  	StateTrieOnlyNoCompression
    49  	AllSnappyCompression
    50  )
    51  
    52  const (
    53  	minWriteBufferSize             = 2 * opt.MiB
    54  	minBlockCacheCapacity          = 2 * minWriteBufferSize
    55  	MinOpenFilesCacheCapacity      = 16
    56  	minBitsPerKeyForFilter         = 10
    57  	minFileDescriptorsForDBManager = 2048
    58  	minFileDescriptorsForLevelDB   = 16
    59  )
    60  
    61  var defaultLevelDBOption = &opt.Options{
    62  	WriteBuffer:            minWriteBufferSize,
    63  	BlockCacheCapacity:     minBlockCacheCapacity,
    64  	OpenFilesCacheCapacity: MinOpenFilesCacheCapacity,
    65  	Filter:                 filter.NewBloomFilter(minBitsPerKeyForFilter),
    66  	DisableBufferPool:      false,
    67  	DisableSeeksCompaction: true,
    68  }
    69  
    70  // GetDefaultLevelDBOption returns default LevelDB option copied from defaultLevelDBOption.
    71  // defaultLevelDBOption has fields with minimum values.
    72  func GetDefaultLevelDBOption() *opt.Options {
    73  	copiedOption := *defaultLevelDBOption
    74  	return &copiedOption
    75  }
    76  
    77  // GetOpenFilesLimit raises out the number of allowed file handles per process
    78  // for Klaytn and returns half of the allowance to assign to the database.
    79  func GetOpenFilesLimit() int {
    80  	limit, err := fdlimit.Current()
    81  	if err != nil {
    82  		logger.Crit("Failed to retrieve file descriptor allowance", "err", err)
    83  	}
    84  	if limit < minFileDescriptorsForDBManager {
    85  		raised, err := fdlimit.Raise(minFileDescriptorsForDBManager)
    86  		if err != nil || raised < minFileDescriptorsForDBManager {
    87  			logger.Crit("Raised number of file descriptor is below the minimum value",
    88  				"currFileDescriptorsLimit", limit, "minFileDescriptorsForDBManager", minFileDescriptorsForDBManager)
    89  		}
    90  		limit = int(raised)
    91  	}
    92  	return limit / 2 // Leave half for networking and other stuff
    93  }
    94  
    95  type levelDB struct {
    96  	fn string      // filename for reporting
    97  	db *leveldb.DB // LevelDB instance
    98  
    99  	writeDelayCountMeter    metrics.Meter // Meter for measuring the cumulative number of write delays
   100  	writeDelayDurationMeter metrics.Meter // Meter for measuring the cumulative duration of write delays
   101  
   102  	aliveSnapshotsMeter metrics.Meter // Meter for measuring the number of alive snapshots
   103  	aliveIteratorsMeter metrics.Meter // Meter for measuring the number of alive iterators
   104  
   105  	compTimer              klaytnmetrics.HybridTimer // Meter for measuring the total time spent in database compaction
   106  	compReadMeter          metrics.Meter             // Meter for measuring the data read during compaction
   107  	compWriteMeter         metrics.Meter             // Meter for measuring the data written during compaction
   108  	diskReadMeter          metrics.Meter             // Meter for measuring the effective amount of data read
   109  	diskWriteMeter         metrics.Meter             // Meter for measuring the effective amount of data written
   110  	blockCacheGauge        metrics.Gauge             // Gauge for measuring the current size of block cache
   111  	openedTablesCountMeter metrics.Meter
   112  	memCompGauge           metrics.Gauge // Gauge for tracking the number of memory compaction
   113  	level0CompGauge        metrics.Gauge // Gauge for tracking the number of table compaction in level0
   114  	nonlevel0CompGauge     metrics.Gauge // Gauge for tracking the number of table compaction in non0 level
   115  	seekCompGauge          metrics.Gauge // Gauge for tracking the number of table compaction caused by read opt
   116  
   117  	levelSizesGauge     []metrics.Gauge
   118  	levelTablesGauge    []metrics.Gauge
   119  	levelReadGauge      []metrics.Gauge
   120  	levelWriteGauge     []metrics.Gauge
   121  	levelDurationsGauge []metrics.Gauge
   122  
   123  	perfCheck       bool
   124  	getTimer        klaytnmetrics.HybridTimer
   125  	putTimer        klaytnmetrics.HybridTimer
   126  	batchWriteTimer klaytnmetrics.HybridTimer
   127  
   128  	quitLock sync.Mutex      // Mutex protecting the quit channel access
   129  	quitChan chan chan error // Quit channel to stop the metrics collection before closing the database
   130  
   131  	prefix string     // prefix used for metrics
   132  	logger log.Logger // Contextual logger tracking the database path
   133  }
   134  
   135  func getLevelDBOptions(dbc *DBConfig) *opt.Options {
   136  	newOption := &opt.Options{
   137  		OpenFilesCacheCapacity:        dbc.OpenFilesLimit,
   138  		BlockCacheCapacity:            dbc.LevelDBCacheSize / 2 * opt.MiB,
   139  		WriteBuffer:                   dbc.LevelDBCacheSize / 2 * opt.MiB,
   140  		Filter:                        filter.NewBloomFilter(10),
   141  		DisableBufferPool:             !dbc.LevelDBBufferPool,
   142  		CompactionTableSize:           2 * opt.MiB,
   143  		CompactionTableSizeMultiplier: 1.0,
   144  		DisableSeeksCompaction:        true,
   145  	}
   146  
   147  	return newOption
   148  }
   149  
   150  func NewLevelDB(dbc *DBConfig, entryType DBEntryType) (*levelDB, error) {
   151  	localLogger := logger.NewWith("path", dbc.Dir)
   152  
   153  	// Ensure we have some minimal caching and file guarantees
   154  	if dbc.LevelDBCacheSize < 16 {
   155  		dbc.LevelDBCacheSize = 16
   156  	}
   157  	if dbc.OpenFilesLimit < minFileDescriptorsForLevelDB {
   158  		dbc.OpenFilesLimit = minFileDescriptorsForLevelDB
   159  	}
   160  
   161  	ldbOpts := getLevelDBOptions(dbc)
   162  	ldbOpts.Compression = getCompressionType(dbc.LevelDBCompression, entryType)
   163  
   164  	localLogger.Info("LevelDB configurations",
   165  		"levelDBCacheSize", (ldbOpts.WriteBuffer+ldbOpts.BlockCacheCapacity)/opt.MiB, "openFilesLimit", ldbOpts.OpenFilesCacheCapacity,
   166  		"useBufferPool", !ldbOpts.DisableBufferPool, "usePerfCheck", dbc.EnableDBPerfMetrics, "compressionType", ldbOpts.Compression,
   167  		"compactionTableSize(MB)", ldbOpts.CompactionTableSize/opt.MiB, "compactionTableSizeMultiplier", ldbOpts.CompactionTableSizeMultiplier)
   168  
   169  	// Open the db and recover any potential corruptions
   170  	db, err := leveldb.OpenFile(dbc.Dir, ldbOpts)
   171  	if _, corrupted := err.(*errors.ErrCorrupted); corrupted {
   172  		db, err = leveldb.RecoverFile(dbc.Dir, nil)
   173  	}
   174  	// (Re)check for errors and abort if opening of the db failed
   175  	if err != nil {
   176  		return nil, err
   177  	}
   178  	return &levelDB{
   179  		fn:        dbc.Dir,
   180  		db:        db,
   181  		logger:    localLogger,
   182  		perfCheck: dbc.EnableDBPerfMetrics,
   183  	}, nil
   184  }
   185  
   186  // setMinLevelDBOption sets some value of options if they are smaller than minimum value.
   187  func setMinLevelDBOption(ldbOption *opt.Options) {
   188  	if ldbOption.WriteBuffer < minWriteBufferSize {
   189  		ldbOption.WriteBuffer = minWriteBufferSize
   190  	}
   191  
   192  	if ldbOption.BlockCacheCapacity < minBlockCacheCapacity {
   193  		ldbOption.BlockCacheCapacity = minBlockCacheCapacity
   194  	}
   195  
   196  	if ldbOption.OpenFilesCacheCapacity < MinOpenFilesCacheCapacity {
   197  		ldbOption.OpenFilesCacheCapacity = MinOpenFilesCacheCapacity
   198  	}
   199  }
   200  
   201  func getCompressionType(ct LevelDBCompressionType, dbEntryType DBEntryType) opt.Compression {
   202  	if ct == AllSnappyCompression {
   203  		return opt.SnappyCompression
   204  	}
   205  
   206  	if ct == AllNoCompression {
   207  		return opt.NoCompression
   208  	}
   209  
   210  	if ct == ReceiptOnlySnappyCompression {
   211  		if dbEntryType == ReceiptsDB {
   212  			return opt.SnappyCompression
   213  		} else {
   214  			return opt.NoCompression
   215  		}
   216  	}
   217  
   218  	if ct == StateTrieOnlyNoCompression {
   219  		if dbEntryType == StateTrieDB {
   220  			return opt.NoCompression
   221  		} else {
   222  			return opt.SnappyCompression
   223  		}
   224  	}
   225  	return opt.NoCompression
   226  }
   227  
   228  // NewLevelDBWithOption explicitly receives LevelDB option to construct a LevelDB object.
   229  func NewLevelDBWithOption(dbPath string, ldbOption *opt.Options) (*levelDB, error) {
   230  	// TODO-Klaytn-Database Replace `NewLevelDB` with `NewLevelDBWithOption`
   231  
   232  	localLogger := logger.NewWith("path", dbPath)
   233  
   234  	setMinLevelDBOption(ldbOption)
   235  
   236  	localLogger.Info("Allocated LevelDB",
   237  		"WriteBuffer (MB)", ldbOption.WriteBuffer/opt.MiB, "OpenFilesCacheCapacity", ldbOption.OpenFilesCacheCapacity, "BlockCacheCapacity (MB)", ldbOption.BlockCacheCapacity/opt.MiB,
   238  		"CompactionTableSize (MB)", ldbOption.CompactionTableSize/opt.MiB, "CompactionTableSizeMultiplier", ldbOption.CompactionTableSizeMultiplier, "DisableBufferPool", ldbOption.DisableBufferPool)
   239  
   240  	// Open the db and recover any potential corruptions
   241  	db, err := leveldb.OpenFile(dbPath, ldbOption)
   242  	if _, corrupted := err.(*errors.ErrCorrupted); corrupted {
   243  		db, err = leveldb.RecoverFile(dbPath, nil)
   244  	}
   245  	// (Re)check for errors and abort if opening of the db failed
   246  	if err != nil {
   247  		return nil, err
   248  	}
   249  	return &levelDB{
   250  		fn:     dbPath,
   251  		db:     db,
   252  		logger: localLogger,
   253  	}, nil
   254  }
   255  
   256  func (db *levelDB) Type() DBType {
   257  	return LevelDB
   258  }
   259  
   260  // Path returns the path to the database directory.
   261  func (db *levelDB) Path() string {
   262  	return db.fn
   263  }
   264  
   265  // Put puts the given key / value to the queue
   266  func (db *levelDB) Put(key []byte, value []byte) error {
   267  	// Generate the data to write to disk, update the meter and write
   268  	// value = rle.Compress(value)
   269  	if db.perfCheck {
   270  		start := time.Now()
   271  		err := db.put(key, value)
   272  		db.putTimer.Update(time.Since(start))
   273  		return err
   274  	}
   275  	return db.put(key, value)
   276  }
   277  
   278  func (db *levelDB) put(key []byte, value []byte) error {
   279  	return db.db.Put(key, value, nil)
   280  }
   281  
   282  func (db *levelDB) Has(key []byte) (bool, error) {
   283  	return db.db.Has(key, nil)
   284  }
   285  
   286  // Get returns the given key if it's present.
   287  func (db *levelDB) Get(key []byte) ([]byte, error) {
   288  	if db.perfCheck {
   289  		start := time.Now()
   290  		val, err := db.get(key)
   291  		db.getTimer.Update(time.Since(start))
   292  		return val, err
   293  	}
   294  	return db.get(key)
   295  	// return rle.Decompress(dat)
   296  }
   297  
   298  func (db *levelDB) get(key []byte) ([]byte, error) {
   299  	dat, err := db.db.Get(key, nil)
   300  	if err != nil {
   301  		if err == leveldb.ErrNotFound {
   302  			return nil, dataNotFoundErr
   303  		}
   304  		return nil, err
   305  	}
   306  	return dat, nil
   307  }
   308  
   309  // Delete deletes the key from the queue and database
   310  func (db *levelDB) Delete(key []byte) error {
   311  	// Execute the actual operation
   312  	return db.db.Delete(key, nil)
   313  }
   314  
   315  // NewIterator creates a binary-alphabetical iterator over a subset
   316  // of database content with a particular key prefix, starting at a particular
   317  // initial key (or after, if it does not exist).
   318  func (db *levelDB) NewIterator(prefix []byte, start []byte) Iterator {
   319  	return db.db.NewIterator(bytesPrefixRange(prefix, start), nil)
   320  }
   321  
   322  func (db *levelDB) Close() {
   323  	// Stop the metrics collection to avoid internal database races
   324  	db.quitLock.Lock()
   325  	defer db.quitLock.Unlock()
   326  
   327  	if db.quitChan != nil {
   328  		errc := make(chan error)
   329  		db.quitChan <- errc
   330  		if err := <-errc; err != nil {
   331  			db.logger.Error("Metrics collection failed", "err", err)
   332  		}
   333  		db.quitChan = nil
   334  	}
   335  	err := db.db.Close()
   336  	if err == nil {
   337  		db.logger.Info("Database closed")
   338  	} else {
   339  		db.logger.Error("Failed to close database", "err", err)
   340  	}
   341  }
   342  
   343  func (db *levelDB) LDB() *leveldb.DB {
   344  	return db.db
   345  }
   346  
   347  // Meter configures the database metrics collectors and
   348  func (db *levelDB) Meter(prefix string) {
   349  	db.prefix = prefix
   350  
   351  	// Initialize all the metrics collector at the requested prefix
   352  	db.writeDelayCountMeter = metrics.NewRegisteredMeter(prefix+"writedelay/count", nil)
   353  	db.writeDelayDurationMeter = metrics.NewRegisteredMeter(prefix+"writedelay/duration", nil)
   354  	db.aliveSnapshotsMeter = metrics.NewRegisteredMeter(prefix+"snapshots", nil)
   355  	db.aliveIteratorsMeter = metrics.NewRegisteredMeter(prefix+"iterators", nil)
   356  	db.compTimer = klaytnmetrics.NewRegisteredHybridTimer(prefix+"compaction/time", nil)
   357  	db.compReadMeter = metrics.NewRegisteredMeter(prefix+"compaction/read", nil)
   358  	db.compWriteMeter = metrics.NewRegisteredMeter(prefix+"compaction/write", nil)
   359  	db.diskReadMeter = metrics.NewRegisteredMeter(prefix+"disk/read", nil)
   360  	db.diskWriteMeter = metrics.NewRegisteredMeter(prefix+"disk/write", nil)
   361  	db.blockCacheGauge = metrics.NewRegisteredGauge(prefix+"blockcache", nil)
   362  
   363  	db.openedTablesCountMeter = metrics.NewRegisteredMeter(prefix+"opendedtables", nil)
   364  
   365  	db.getTimer = klaytnmetrics.NewRegisteredHybridTimer(prefix+"get/time", nil)
   366  	db.putTimer = klaytnmetrics.NewRegisteredHybridTimer(prefix+"put/time", nil)
   367  	db.batchWriteTimer = klaytnmetrics.NewRegisteredHybridTimer(prefix+"batchwrite/time", nil)
   368  
   369  	db.memCompGauge = metrics.NewRegisteredGauge(prefix+"compact/memory", nil)
   370  	db.level0CompGauge = metrics.NewRegisteredGauge(prefix+"compact/level0", nil)
   371  	db.nonlevel0CompGauge = metrics.NewRegisteredGauge(prefix+"compact/nonlevel0", nil)
   372  	db.seekCompGauge = metrics.NewRegisteredGauge(prefix+"compact/seek", nil)
   373  
   374  	// Short circuit metering if the metrics system is disabled
   375  	// Above meters are initialized by NilMeter if metricutils.Enabled == false
   376  	if !metricutils.Enabled {
   377  		return
   378  	}
   379  
   380  	// Create a quit channel for the periodic collector and run it
   381  	db.quitLock.Lock()
   382  	db.quitChan = make(chan chan error)
   383  	db.quitLock.Unlock()
   384  
   385  	go db.meter(3 * time.Second)
   386  }
   387  
   388  // meter periodically retrieves internal leveldb counters and reports them to
   389  // the metrics subsystem.
   390  //
   391  // This is how a stats table look like (currently):
   392  //   Compactions
   393  //    Level |   Tables   |    Size(MB)   |    Time(sec)  |    Read(MB)   |   Write(MB)
   394  //   -------+------------+---------------+---------------+---------------+---------------
   395  //      0   |          0 |       0.00000 |       1.27969 |       0.00000 |      12.31098
   396  //      1   |         85 |     109.27913 |      28.09293 |     213.92493 |     214.26294
   397  //      2   |        523 |    1000.37159 |       7.26059 |      66.86342 |      66.77884
   398  //      3   |        570 |    1113.18458 |       0.00000 |       0.00000 |       0.00000
   399  //
   400  // This is how the iostats look like (currently):
   401  // Read(MB):3895.04860 Write(MB):3654.64712
   402  func (db *levelDB) meter(refresh time.Duration) {
   403  	s := new(leveldb.DBStats)
   404  
   405  	// Write delay related stats
   406  	var prevWriteDelayCount int32
   407  	var prevWriteDelayDuration time.Duration
   408  
   409  	// Alive snapshots/iterators
   410  	var prevAliveSnapshots, prevAliveIterators int32
   411  
   412  	// Compaction related stats
   413  	var prevCompRead, prevCompWrite int64
   414  	var prevCompTime time.Duration
   415  
   416  	// IO related stats
   417  	var prevRead, prevWrite uint64
   418  
   419  	var (
   420  		errc chan error
   421  		merr error
   422  	)
   423  
   424  	// Keep collecting stats unless an error occurs
   425  hasError:
   426  	for {
   427  		merr = db.db.Stats(s)
   428  		if merr != nil {
   429  			break
   430  		}
   431  		// Write delay related stats
   432  		db.writeDelayCountMeter.Mark(int64(s.WriteDelayCount - prevWriteDelayCount))
   433  		db.writeDelayDurationMeter.Mark(int64(s.WriteDelayDuration - prevWriteDelayDuration))
   434  		prevWriteDelayCount, prevWriteDelayDuration = s.WriteDelayCount, s.WriteDelayDuration
   435  
   436  		// Alive snapshots/iterators
   437  		db.aliveSnapshotsMeter.Mark(int64(s.AliveSnapshots - prevAliveSnapshots))
   438  		db.aliveIteratorsMeter.Mark(int64(s.AliveIterators - prevAliveIterators))
   439  		prevAliveSnapshots, prevAliveIterators = s.AliveSnapshots, s.AliveIterators
   440  
   441  		// Compaction related stats
   442  		var currCompRead, currCompWrite int64
   443  		var currCompTime time.Duration
   444  		for i := 0; i < len(s.LevelDurations); i++ {
   445  			currCompTime += s.LevelDurations[i]
   446  			currCompRead += s.LevelRead[i]
   447  			currCompWrite += s.LevelWrite[i]
   448  
   449  			db.updateLevelStats(s, i)
   450  		}
   451  		db.compTimer.Update(currCompTime - prevCompTime)
   452  		db.compReadMeter.Mark(currCompRead - prevCompRead)
   453  		db.compWriteMeter.Mark(currCompWrite - prevCompWrite)
   454  		prevCompTime, prevCompRead, prevCompWrite = currCompTime, currCompRead, currCompWrite
   455  
   456  		// IO related stats
   457  		currRead, currWrite := s.IORead, s.IOWrite
   458  		db.diskReadMeter.Mark(int64(currRead - prevRead))
   459  		db.diskWriteMeter.Mark(int64(currWrite - prevWrite))
   460  		prevRead, prevWrite = currRead, currWrite
   461  
   462  		// BlockCache/OpenedTables related stats
   463  		db.blockCacheGauge.Update(int64(s.BlockCacheSize))
   464  		db.openedTablesCountMeter.Mark(int64(s.OpenedTablesCount))
   465  
   466  		// Compaction related stats
   467  		db.memCompGauge.Update(int64(s.MemComp))
   468  		db.level0CompGauge.Update(int64(s.Level0Comp))
   469  		db.nonlevel0CompGauge.Update(int64(s.NonLevel0Comp))
   470  		db.seekCompGauge.Update(int64(s.SeekComp))
   471  
   472  		// Sleep a bit, then repeat the stats collection
   473  		select {
   474  		case errc = <-db.quitChan:
   475  			// Quit requesting, stop hammering the database
   476  			break hasError
   477  		case <-time.After(refresh):
   478  			// Timeout, gather a new set of stats
   479  		}
   480  	}
   481  
   482  	if errc == nil {
   483  		errc = <-db.quitChan
   484  	}
   485  	errc <- merr
   486  }
   487  
   488  // updateLevelStats collects level-wise stats.
   489  func (db *levelDB) updateLevelStats(s *leveldb.DBStats, lv int) {
   490  	// dynamically creates a new metrics for a new level
   491  	if len(db.levelSizesGauge) <= lv {
   492  		prefix := db.prefix + fmt.Sprintf("level%v/", lv)
   493  		db.levelSizesGauge = append(db.levelSizesGauge, metrics.NewRegisteredGauge(prefix+"size", nil))
   494  		db.levelTablesGauge = append(db.levelTablesGauge, metrics.NewRegisteredGauge(prefix+"tables", nil))
   495  		db.levelReadGauge = append(db.levelReadGauge, metrics.NewRegisteredGauge(prefix+"read", nil))
   496  		db.levelWriteGauge = append(db.levelWriteGauge, metrics.NewRegisteredGauge(prefix+"write", nil))
   497  		db.levelDurationsGauge = append(db.levelDurationsGauge, metrics.NewRegisteredGauge(prefix+"duration", nil))
   498  	}
   499  
   500  	db.levelSizesGauge[lv].Update(s.LevelSizes[lv])
   501  	db.levelTablesGauge[lv].Update(int64(s.LevelTablesCounts[lv]))
   502  	db.levelReadGauge[lv].Update(s.LevelRead[lv])
   503  	db.levelWriteGauge[lv].Update(s.LevelWrite[lv])
   504  	db.levelDurationsGauge[lv].Update(int64(s.LevelDurations[lv]))
   505  }
   506  
   507  func (db *levelDB) NewBatch() Batch {
   508  	return &ldbBatch{b: new(leveldb.Batch), ldb: db}
   509  }
   510  
   511  // ldbBatch is a write-only leveldb batch that commits changes to its host database
   512  // when Write is called. A batch cannot be used concurrently.
   513  type ldbBatch struct {
   514  	b    *leveldb.Batch
   515  	ldb  *levelDB
   516  	size int
   517  }
   518  
   519  // Put inserts the given value into the batch for later committing.
   520  func (b *ldbBatch) Put(key, value []byte) error {
   521  	b.b.Put(key, value)
   522  	b.size += len(value)
   523  	return nil
   524  }
   525  
   526  // Delete inserts the a key removal into the batch for later committing.
   527  func (b *ldbBatch) Delete(key []byte) error {
   528  	b.b.Delete(key)
   529  	b.size++
   530  	return nil
   531  }
   532  
   533  // Write flushes any accumulated data to disk.
   534  func (b *ldbBatch) Write() error {
   535  	if b.ldb.perfCheck {
   536  		start := time.Now()
   537  		err := b.write()
   538  		b.ldb.batchWriteTimer.Update(time.Since(start))
   539  		return err
   540  	}
   541  	return b.write()
   542  }
   543  
   544  func (b *ldbBatch) write() error {
   545  	return b.ldb.db.Write(b.b, nil)
   546  }
   547  
   548  // ValueSize retrieves the amount of data queued up for writing.
   549  func (b *ldbBatch) ValueSize() int {
   550  	return b.size
   551  }
   552  
   553  // Reset resets the batch for reuse.
   554  func (b *ldbBatch) Reset() {
   555  	b.b.Reset()
   556  	b.size = 0
   557  }
   558  
   559  // bytesPrefixRange returns key range that satisfy
   560  // - the given prefix, and
   561  // - the given seek position
   562  func bytesPrefixRange(prefix, start []byte) *util.Range {
   563  	r := util.BytesPrefix(prefix)
   564  	r.Start = append(r.Start, start...)
   565  	return r
   566  }
   567  
   568  // Replay replays the batch contents.
   569  func (b *ldbBatch) Replay(w KeyValueWriter) error {
   570  	return b.b.Replay(&replayer{writer: w})
   571  }
   572  
   573  // replayer is a small wrapper to implement the correct replay methods.
   574  type replayer struct {
   575  	writer  KeyValueWriter
   576  	failure error
   577  }
   578  
   579  // Put inserts the given value into the key-value data store.
   580  func (r *replayer) Put(key, value []byte) {
   581  	// If the replay already failed, stop executing ops
   582  	if r.failure != nil {
   583  		return
   584  	}
   585  	r.failure = r.writer.Put(key, value)
   586  }
   587  
   588  // Delete removes the key from the key-value data store.
   589  func (r *replayer) Delete(key []byte) {
   590  	// If the replay already failed, stop executing ops
   591  	if r.failure != nil {
   592  		return
   593  	}
   594  	r.failure = r.writer.Delete(key)
   595  }