github.com/gilgames000/kcc-geth@v1.0.6/ethdb/leveldb/leveldb.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // +build !js
    18  
    19  // Package leveldb implements the key-value database layer based on LevelDB.
    20  package leveldb
    21  
    22  import (
    23  	"fmt"
    24  	"strconv"
    25  	"strings"
    26  	"sync"
    27  	"time"
    28  
    29  	"github.com/ethereum/go-ethereum/common"
    30  	"github.com/ethereum/go-ethereum/ethdb"
    31  	"github.com/ethereum/go-ethereum/log"
    32  	"github.com/ethereum/go-ethereum/metrics"
    33  	"github.com/syndtr/goleveldb/leveldb"
    34  	"github.com/syndtr/goleveldb/leveldb/errors"
    35  	"github.com/syndtr/goleveldb/leveldb/filter"
    36  	"github.com/syndtr/goleveldb/leveldb/opt"
    37  	"github.com/syndtr/goleveldb/leveldb/util"
    38  )
    39  
    40  const (
    41  	// degradationWarnInterval specifies how often warning should be printed if the
    42  	// leveldb database cannot keep up with requested writes.
    43  	degradationWarnInterval = time.Minute
    44  
    45  	// minCache is the minimum amount of memory in megabytes to allocate to leveldb
    46  	// read and write caching, split half and half.
    47  	minCache = 16
    48  
    49  	// minHandles is the minimum number of files handles to allocate to the open
    50  	// database files.
    51  	minHandles = 16
    52  
    53  	// metricsGatheringInterval specifies the interval to retrieve leveldb database
    54  	// compaction, io and pause stats to report to the user.
    55  	metricsGatheringInterval = 3 * time.Second
    56  )
    57  
    58  // Database is a persistent key-value store. Apart from basic data storage
    59  // functionality it also supports batch writes and iterating over the keyspace in
    60  // binary-alphabetical order.
    61  type Database struct {
    62  	fn string      // filename for reporting
    63  	db *leveldb.DB // LevelDB instance
    64  
    65  	compTimeMeter      metrics.Meter // Meter for measuring the total time spent in database compaction
    66  	compReadMeter      metrics.Meter // Meter for measuring the data read during compaction
    67  	compWriteMeter     metrics.Meter // Meter for measuring the data written during compaction
    68  	writeDelayNMeter   metrics.Meter // Meter for measuring the write delay number due to database compaction
    69  	writeDelayMeter    metrics.Meter // Meter for measuring the write delay duration due to database compaction
    70  	diskSizeGauge      metrics.Gauge // Gauge for tracking the size of all the levels in the database
    71  	diskReadMeter      metrics.Meter // Meter for measuring the effective amount of data read
    72  	diskWriteMeter     metrics.Meter // Meter for measuring the effective amount of data written
    73  	memCompGauge       metrics.Gauge // Gauge for tracking the number of memory compaction
    74  	level0CompGauge    metrics.Gauge // Gauge for tracking the number of table compaction in level0
    75  	nonlevel0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in non0 level
    76  	seekCompGauge      metrics.Gauge // Gauge for tracking the number of table compaction caused by read opt
    77  
    78  	quitLock sync.Mutex      // Mutex protecting the quit channel access
    79  	quitChan chan chan error // Quit channel to stop the metrics collection before closing the database
    80  
    81  	log log.Logger // Contextual logger tracking the database path
    82  }
    83  
    84  // New returns a wrapped LevelDB object. The namespace is the prefix that the
    85  // metrics reporting should use for surfacing internal stats.
    86  func New(file string, cache int, handles int, namespace string) (*Database, error) {
    87  	return NewCustom(file, namespace, func(options *opt.Options) {
    88  		// Ensure we have some minimal caching and file guarantees
    89  		if cache < minCache {
    90  			cache = minCache
    91  		}
    92  		if handles < minHandles {
    93  			handles = minHandles
    94  		}
    95  		// Set default options
    96  		options.OpenFilesCacheCapacity = handles
    97  		options.BlockCacheCapacity = cache / 2 * opt.MiB
    98  		options.WriteBuffer = cache / 4 * opt.MiB // Two of these are used internally
    99  	})
   100  }
   101  
   102  // NewCustom returns a wrapped LevelDB object. The namespace is the prefix that the
   103  // metrics reporting should use for surfacing internal stats.
   104  // The customize function allows the caller to modify the leveldb options.
   105  func NewCustom(file string, namespace string, customize func(options *opt.Options)) (*Database, error) {
   106  	options := configureOptions(customize)
   107  	logger := log.New("database", file)
   108  	usedCache := options.GetBlockCacheCapacity() + options.GetWriteBuffer()*2
   109  	logCtx := []interface{}{"cache", common.StorageSize(usedCache), "handles", options.GetOpenFilesCacheCapacity()}
   110  	if options.ReadOnly {
   111  		logCtx = append(logCtx, "readonly", "true")
   112  	}
   113  	logger.Info("Allocated cache and file handles", logCtx...)
   114  
   115  	// Open the db and recover any potential corruptions
   116  	db, err := leveldb.OpenFile(file, options)
   117  	if _, corrupted := err.(*errors.ErrCorrupted); corrupted {
   118  		db, err = leveldb.RecoverFile(file, nil)
   119  	}
   120  	if err != nil {
   121  		return nil, err
   122  	}
   123  	// Assemble the wrapper with all the registered metrics
   124  	ldb := &Database{
   125  		fn:       file,
   126  		db:       db,
   127  		log:      logger,
   128  		quitChan: make(chan chan error),
   129  	}
   130  	ldb.compTimeMeter = metrics.NewRegisteredMeter(namespace+"compact/time", nil)
   131  	ldb.compReadMeter = metrics.NewRegisteredMeter(namespace+"compact/input", nil)
   132  	ldb.compWriteMeter = metrics.NewRegisteredMeter(namespace+"compact/output", nil)
   133  	ldb.diskSizeGauge = metrics.NewRegisteredGauge(namespace+"disk/size", nil)
   134  	ldb.diskReadMeter = metrics.NewRegisteredMeter(namespace+"disk/read", nil)
   135  	ldb.diskWriteMeter = metrics.NewRegisteredMeter(namespace+"disk/write", nil)
   136  	ldb.writeDelayMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/duration", nil)
   137  	ldb.writeDelayNMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/counter", nil)
   138  	ldb.memCompGauge = metrics.NewRegisteredGauge(namespace+"compact/memory", nil)
   139  	ldb.level0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/level0", nil)
   140  	ldb.nonlevel0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/nonlevel0", nil)
   141  	ldb.seekCompGauge = metrics.NewRegisteredGauge(namespace+"compact/seek", nil)
   142  
   143  	// Start up the metrics gathering and return
   144  	go ldb.meter(metricsGatheringInterval)
   145  	return ldb, nil
   146  }
   147  
   148  // configureOptions sets some default options, then runs the provided setter.
   149  func configureOptions(customizeFn func(*opt.Options)) *opt.Options {
   150  	// Set default options
   151  	options := &opt.Options{
   152  		Filter:                 filter.NewBloomFilter(10),
   153  		DisableSeeksCompaction: true,
   154  	}
   155  	// Allow caller to make custom modifications to the options
   156  	if customizeFn != nil {
   157  		customizeFn(options)
   158  	}
   159  	return options
   160  }
   161  
   162  // Close stops the metrics collection, flushes any pending data to disk and closes
   163  // all io accesses to the underlying key-value store.
   164  func (db *Database) Close() error {
   165  	db.quitLock.Lock()
   166  	defer db.quitLock.Unlock()
   167  
   168  	if db.quitChan != nil {
   169  		errc := make(chan error)
   170  		db.quitChan <- errc
   171  		if err := <-errc; err != nil {
   172  			db.log.Error("Metrics collection failed", "err", err)
   173  		}
   174  		db.quitChan = nil
   175  	}
   176  	return db.db.Close()
   177  }
   178  
   179  // Has retrieves if a key is present in the key-value store.
   180  func (db *Database) Has(key []byte) (bool, error) {
   181  	return db.db.Has(key, nil)
   182  }
   183  
   184  // Get retrieves the given key if it's present in the key-value store.
   185  func (db *Database) Get(key []byte) ([]byte, error) {
   186  	dat, err := db.db.Get(key, nil)
   187  	if err != nil {
   188  		return nil, err
   189  	}
   190  	return dat, nil
   191  }
   192  
   193  // Put inserts the given value into the key-value store.
   194  func (db *Database) Put(key []byte, value []byte) error {
   195  	return db.db.Put(key, value, nil)
   196  }
   197  
   198  // Delete removes the key from the key-value store.
   199  func (db *Database) Delete(key []byte) error {
   200  	return db.db.Delete(key, nil)
   201  }
   202  
   203  // NewBatch creates a write-only key-value store that buffers changes to its host
   204  // database until a final write is called.
   205  func (db *Database) NewBatch() ethdb.Batch {
   206  	return &batch{
   207  		db: db.db,
   208  		b:  new(leveldb.Batch),
   209  	}
   210  }
   211  
   212  // NewIterator creates a binary-alphabetical iterator over a subset
   213  // of database content with a particular key prefix, starting at a particular
   214  // initial key (or after, if it does not exist).
   215  func (db *Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator {
   216  	return db.db.NewIterator(bytesPrefixRange(prefix, start), nil)
   217  }
   218  
   219  // Stat returns a particular internal stat of the database.
   220  func (db *Database) Stat(property string) (string, error) {
   221  	return db.db.GetProperty(property)
   222  }
   223  
   224  // Compact flattens the underlying data store for the given key range. In essence,
   225  // deleted and overwritten versions are discarded, and the data is rearranged to
   226  // reduce the cost of operations needed to access them.
   227  //
   228  // A nil start is treated as a key before all keys in the data store; a nil limit
   229  // is treated as a key after all keys in the data store. If both is nil then it
   230  // will compact entire data store.
   231  func (db *Database) Compact(start []byte, limit []byte) error {
   232  	return db.db.CompactRange(util.Range{Start: start, Limit: limit})
   233  }
   234  
   235  // Path returns the path to the database directory.
   236  func (db *Database) Path() string {
   237  	return db.fn
   238  }
   239  
   240  // meter periodically retrieves internal leveldb counters and reports them to
   241  // the metrics subsystem.
   242  //
   243  // This is how a LevelDB stats table looks like (currently):
   244  //   Compactions
   245  //    Level |   Tables   |    Size(MB)   |    Time(sec)  |    Read(MB)   |   Write(MB)
   246  //   -------+------------+---------------+---------------+---------------+---------------
   247  //      0   |          0 |       0.00000 |       1.27969 |       0.00000 |      12.31098
   248  //      1   |         85 |     109.27913 |      28.09293 |     213.92493 |     214.26294
   249  //      2   |        523 |    1000.37159 |       7.26059 |      66.86342 |      66.77884
   250  //      3   |        570 |    1113.18458 |       0.00000 |       0.00000 |       0.00000
   251  //
   252  // This is how the write delay look like (currently):
   253  // DelayN:5 Delay:406.604657ms Paused: false
   254  //
   255  // This is how the iostats look like (currently):
   256  // Read(MB):3895.04860 Write(MB):3654.64712
   257  func (db *Database) meter(refresh time.Duration) {
   258  	// Create the counters to store current and previous compaction values
   259  	compactions := make([][]float64, 2)
   260  	for i := 0; i < 2; i++ {
   261  		compactions[i] = make([]float64, 4)
   262  	}
   263  	// Create storage for iostats.
   264  	var iostats [2]float64
   265  
   266  	// Create storage and warning log tracer for write delay.
   267  	var (
   268  		delaystats      [2]int64
   269  		lastWritePaused time.Time
   270  	)
   271  
   272  	var (
   273  		errc chan error
   274  		merr error
   275  	)
   276  
   277  	timer := time.NewTimer(refresh)
   278  	defer timer.Stop()
   279  
   280  	// Iterate ad infinitum and collect the stats
   281  	for i := 1; errc == nil && merr == nil; i++ {
   282  		// Retrieve the database stats
   283  		stats, err := db.db.GetProperty("leveldb.stats")
   284  		if err != nil {
   285  			db.log.Error("Failed to read database stats", "err", err)
   286  			merr = err
   287  			continue
   288  		}
   289  		// Find the compaction table, skip the header
   290  		lines := strings.Split(stats, "\n")
   291  		for len(lines) > 0 && strings.TrimSpace(lines[0]) != "Compactions" {
   292  			lines = lines[1:]
   293  		}
   294  		if len(lines) <= 3 {
   295  			db.log.Error("Compaction leveldbTable not found")
   296  			merr = errors.New("compaction leveldbTable not found")
   297  			continue
   298  		}
   299  		lines = lines[3:]
   300  
   301  		// Iterate over all the leveldbTable rows, and accumulate the entries
   302  		for j := 0; j < len(compactions[i%2]); j++ {
   303  			compactions[i%2][j] = 0
   304  		}
   305  		for _, line := range lines {
   306  			parts := strings.Split(line, "|")
   307  			if len(parts) != 6 {
   308  				break
   309  			}
   310  			for idx, counter := range parts[2:] {
   311  				value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64)
   312  				if err != nil {
   313  					db.log.Error("Compaction entry parsing failed", "err", err)
   314  					merr = err
   315  					continue
   316  				}
   317  				compactions[i%2][idx] += value
   318  			}
   319  		}
   320  		// Update all the requested meters
   321  		if db.diskSizeGauge != nil {
   322  			db.diskSizeGauge.Update(int64(compactions[i%2][0] * 1024 * 1024))
   323  		}
   324  		if db.compTimeMeter != nil {
   325  			db.compTimeMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1000 * 1000 * 1000))
   326  		}
   327  		if db.compReadMeter != nil {
   328  			db.compReadMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024))
   329  		}
   330  		if db.compWriteMeter != nil {
   331  			db.compWriteMeter.Mark(int64((compactions[i%2][3] - compactions[(i-1)%2][3]) * 1024 * 1024))
   332  		}
   333  		// Retrieve the write delay statistic
   334  		writedelay, err := db.db.GetProperty("leveldb.writedelay")
   335  		if err != nil {
   336  			db.log.Error("Failed to read database write delay statistic", "err", err)
   337  			merr = err
   338  			continue
   339  		}
   340  		var (
   341  			delayN        int64
   342  			delayDuration string
   343  			duration      time.Duration
   344  			paused        bool
   345  		)
   346  		if n, err := fmt.Sscanf(writedelay, "DelayN:%d Delay:%s Paused:%t", &delayN, &delayDuration, &paused); n != 3 || err != nil {
   347  			db.log.Error("Write delay statistic not found")
   348  			merr = err
   349  			continue
   350  		}
   351  		duration, err = time.ParseDuration(delayDuration)
   352  		if err != nil {
   353  			db.log.Error("Failed to parse delay duration", "err", err)
   354  			merr = err
   355  			continue
   356  		}
   357  		if db.writeDelayNMeter != nil {
   358  			db.writeDelayNMeter.Mark(delayN - delaystats[0])
   359  		}
   360  		if db.writeDelayMeter != nil {
   361  			db.writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1])
   362  		}
   363  		// If a warning that db is performing compaction has been displayed, any subsequent
   364  		// warnings will be withheld for one minute not to overwhelm the user.
   365  		if paused && delayN-delaystats[0] == 0 && duration.Nanoseconds()-delaystats[1] == 0 &&
   366  			time.Now().After(lastWritePaused.Add(degradationWarnInterval)) {
   367  			db.log.Warn("Database compacting, degraded performance")
   368  			lastWritePaused = time.Now()
   369  		}
   370  		delaystats[0], delaystats[1] = delayN, duration.Nanoseconds()
   371  
   372  		// Retrieve the database iostats.
   373  		ioStats, err := db.db.GetProperty("leveldb.iostats")
   374  		if err != nil {
   375  			db.log.Error("Failed to read database iostats", "err", err)
   376  			merr = err
   377  			continue
   378  		}
   379  		var nRead, nWrite float64
   380  		parts := strings.Split(ioStats, " ")
   381  		if len(parts) < 2 {
   382  			db.log.Error("Bad syntax of ioStats", "ioStats", ioStats)
   383  			merr = fmt.Errorf("bad syntax of ioStats %s", ioStats)
   384  			continue
   385  		}
   386  		if n, err := fmt.Sscanf(parts[0], "Read(MB):%f", &nRead); n != 1 || err != nil {
   387  			db.log.Error("Bad syntax of read entry", "entry", parts[0])
   388  			merr = err
   389  			continue
   390  		}
   391  		if n, err := fmt.Sscanf(parts[1], "Write(MB):%f", &nWrite); n != 1 || err != nil {
   392  			db.log.Error("Bad syntax of write entry", "entry", parts[1])
   393  			merr = err
   394  			continue
   395  		}
   396  		if db.diskReadMeter != nil {
   397  			db.diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024))
   398  		}
   399  		if db.diskWriteMeter != nil {
   400  			db.diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024))
   401  		}
   402  		iostats[0], iostats[1] = nRead, nWrite
   403  
   404  		compCount, err := db.db.GetProperty("leveldb.compcount")
   405  		if err != nil {
   406  			db.log.Error("Failed to read database iostats", "err", err)
   407  			merr = err
   408  			continue
   409  		}
   410  
   411  		var (
   412  			memComp       uint32
   413  			level0Comp    uint32
   414  			nonLevel0Comp uint32
   415  			seekComp      uint32
   416  		)
   417  		if n, err := fmt.Sscanf(compCount, "MemComp:%d Level0Comp:%d NonLevel0Comp:%d SeekComp:%d", &memComp, &level0Comp, &nonLevel0Comp, &seekComp); n != 4 || err != nil {
   418  			db.log.Error("Compaction count statistic not found")
   419  			merr = err
   420  			continue
   421  		}
   422  		db.memCompGauge.Update(int64(memComp))
   423  		db.level0CompGauge.Update(int64(level0Comp))
   424  		db.nonlevel0CompGauge.Update(int64(nonLevel0Comp))
   425  		db.seekCompGauge.Update(int64(seekComp))
   426  
   427  		// Sleep a bit, then repeat the stats collection
   428  		select {
   429  		case errc = <-db.quitChan:
   430  			// Quit requesting, stop hammering the database
   431  		case <-timer.C:
   432  			timer.Reset(refresh)
   433  			// Timeout, gather a new set of stats
   434  		}
   435  	}
   436  
   437  	if errc == nil {
   438  		errc = <-db.quitChan
   439  	}
   440  	errc <- merr
   441  }
   442  
   443  // batch is a write-only leveldb batch that commits changes to its host database
   444  // when Write is called. A batch cannot be used concurrently.
   445  type batch struct {
   446  	db   *leveldb.DB
   447  	b    *leveldb.Batch
   448  	size int
   449  }
   450  
   451  // Put inserts the given value into the batch for later committing.
   452  func (b *batch) Put(key, value []byte) error {
   453  	b.b.Put(key, value)
   454  	b.size += len(value)
   455  	return nil
   456  }
   457  
   458  // Delete inserts the a key removal into the batch for later committing.
   459  func (b *batch) Delete(key []byte) error {
   460  	b.b.Delete(key)
   461  	b.size++
   462  	return nil
   463  }
   464  
   465  // ValueSize retrieves the amount of data queued up for writing.
   466  func (b *batch) ValueSize() int {
   467  	return b.size
   468  }
   469  
   470  // Write flushes any accumulated data to disk.
   471  func (b *batch) Write() error {
   472  	return b.db.Write(b.b, nil)
   473  }
   474  
   475  // Reset resets the batch for reuse.
   476  func (b *batch) Reset() {
   477  	b.b.Reset()
   478  	b.size = 0
   479  }
   480  
   481  // Replay replays the batch contents.
   482  func (b *batch) Replay(w ethdb.KeyValueWriter) error {
   483  	return b.b.Replay(&replayer{writer: w})
   484  }
   485  
   486  // replayer is a small wrapper to implement the correct replay methods.
   487  type replayer struct {
   488  	writer  ethdb.KeyValueWriter
   489  	failure error
   490  }
   491  
   492  // Put inserts the given value into the key-value data store.
   493  func (r *replayer) Put(key, value []byte) {
   494  	// If the replay already failed, stop executing ops
   495  	if r.failure != nil {
   496  		return
   497  	}
   498  	r.failure = r.writer.Put(key, value)
   499  }
   500  
   501  // Delete removes the key from the key-value data store.
   502  func (r *replayer) Delete(key []byte) {
   503  	// If the replay already failed, stop executing ops
   504  	if r.failure != nil {
   505  		return
   506  	}
   507  	r.failure = r.writer.Delete(key)
   508  }
   509  
   510  // bytesPrefixRange returns key range that satisfy
   511  // - the given prefix, and
   512  // - the given seek position
   513  func bytesPrefixRange(prefix, start []byte) *util.Range {
   514  	r := util.BytesPrefix(prefix)
   515  	r.Start = append(r.Start, start...)
   516  	return r
   517  }