github.com/zhiqiangxu/go-ethereum@v1.9.16-0.20210824055606-be91cfdebc48/ethdb/leveldb/leveldb.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // +build !js
    18  
    19  // Package leveldb implements the key-value database layer based on LevelDB.
    20  package leveldb
    21  
    22  import (
    23  	"fmt"
    24  	"strconv"
    25  	"strings"
    26  	"sync"
    27  	"time"
    28  
    29  	"github.com/syndtr/goleveldb/leveldb"
    30  	"github.com/syndtr/goleveldb/leveldb/errors"
    31  	"github.com/syndtr/goleveldb/leveldb/filter"
    32  	"github.com/syndtr/goleveldb/leveldb/opt"
    33  	"github.com/syndtr/goleveldb/leveldb/util"
    34  	"github.com/zhiqiangxu/go-ethereum/common"
    35  	"github.com/zhiqiangxu/go-ethereum/ethdb"
    36  	"github.com/zhiqiangxu/go-ethereum/log"
    37  	"github.com/zhiqiangxu/go-ethereum/metrics"
    38  )
    39  
    40  const (
    41  	// degradationWarnInterval specifies how often warning should be printed if the
    42  	// leveldb database cannot keep up with requested writes.
    43  	degradationWarnInterval = time.Minute
    44  
    45  	// minCache is the minimum amount of memory in megabytes to allocate to leveldb
    46  	// read and write caching, split half and half.
    47  	minCache = 16
    48  
    49  	// minHandles is the minimum number of files handles to allocate to the open
    50  	// database files.
    51  	minHandles = 16
    52  
    53  	// metricsGatheringInterval specifies the interval to retrieve leveldb database
    54  	// compaction, io and pause stats to report to the user.
    55  	metricsGatheringInterval = 3 * time.Second
    56  )
    57  
    58  // Database is a persistent key-value store. Apart from basic data storage
    59  // functionality it also supports batch writes and iterating over the keyspace in
    60  // binary-alphabetical order.
    61  type Database struct {
    62  	fn string      // filename for reporting
    63  	db *leveldb.DB // LevelDB instance
    64  
    65  	compTimeMeter      metrics.Meter // Meter for measuring the total time spent in database compaction
    66  	compReadMeter      metrics.Meter // Meter for measuring the data read during compaction
    67  	compWriteMeter     metrics.Meter // Meter for measuring the data written during compaction
    68  	writeDelayNMeter   metrics.Meter // Meter for measuring the write delay number due to database compaction
    69  	writeDelayMeter    metrics.Meter // Meter for measuring the write delay duration due to database compaction
    70  	diskSizeGauge      metrics.Gauge // Gauge for tracking the size of all the levels in the database
    71  	diskReadMeter      metrics.Meter // Meter for measuring the effective amount of data read
    72  	diskWriteMeter     metrics.Meter // Meter for measuring the effective amount of data written
    73  	memCompGauge       metrics.Gauge // Gauge for tracking the number of memory compaction
    74  	level0CompGauge    metrics.Gauge // Gauge for tracking the number of table compaction in level0
    75  	nonlevel0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in non0 level
    76  	seekCompGauge      metrics.Gauge // Gauge for tracking the number of table compaction caused by read opt
    77  
    78  	quitLock sync.Mutex      // Mutex protecting the quit channel access
    79  	quitChan chan chan error // Quit channel to stop the metrics collection before closing the database
    80  
    81  	log log.Logger // Contextual logger tracking the database path
    82  }
    83  
    84  // New returns a wrapped LevelDB object. The namespace is the prefix that the
    85  // metrics reporting should use for surfacing internal stats.
    86  func New(file string, cache int, handles int, namespace string) (*Database, error) {
    87  	// Ensure we have some minimal caching and file guarantees
    88  	if cache < minCache {
    89  		cache = minCache
    90  	}
    91  	if handles < minHandles {
    92  		handles = minHandles
    93  	}
    94  	logger := log.New("database", file)
    95  	logger.Info("Allocated cache and file handles", "cache", common.StorageSize(cache*1024*1024), "handles", handles)
    96  
    97  	// Open the db and recover any potential corruptions
    98  	db, err := leveldb.OpenFile(file, &opt.Options{
    99  		OpenFilesCacheCapacity: handles,
   100  		BlockCacheCapacity:     cache / 2 * opt.MiB,
   101  		WriteBuffer:            cache / 4 * opt.MiB, // Two of these are used internally
   102  		Filter:                 filter.NewBloomFilter(10),
   103  		DisableSeeksCompaction: true,
   104  	})
   105  	if _, corrupted := err.(*errors.ErrCorrupted); corrupted {
   106  		db, err = leveldb.RecoverFile(file, nil)
   107  	}
   108  	if err != nil {
   109  		return nil, err
   110  	}
   111  	// Assemble the wrapper with all the registered metrics
   112  	ldb := &Database{
   113  		fn:       file,
   114  		db:       db,
   115  		log:      logger,
   116  		quitChan: make(chan chan error),
   117  	}
   118  	ldb.compTimeMeter = metrics.NewRegisteredMeter(namespace+"compact/time", nil)
   119  	ldb.compReadMeter = metrics.NewRegisteredMeter(namespace+"compact/input", nil)
   120  	ldb.compWriteMeter = metrics.NewRegisteredMeter(namespace+"compact/output", nil)
   121  	ldb.diskSizeGauge = metrics.NewRegisteredGauge(namespace+"disk/size", nil)
   122  	ldb.diskReadMeter = metrics.NewRegisteredMeter(namespace+"disk/read", nil)
   123  	ldb.diskWriteMeter = metrics.NewRegisteredMeter(namespace+"disk/write", nil)
   124  	ldb.writeDelayMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/duration", nil)
   125  	ldb.writeDelayNMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/counter", nil)
   126  	ldb.memCompGauge = metrics.NewRegisteredGauge(namespace+"compact/memory", nil)
   127  	ldb.level0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/level0", nil)
   128  	ldb.nonlevel0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/nonlevel0", nil)
   129  	ldb.seekCompGauge = metrics.NewRegisteredGauge(namespace+"compact/seek", nil)
   130  
   131  	// Start up the metrics gathering and return
   132  	go ldb.meter(metricsGatheringInterval)
   133  	return ldb, nil
   134  }
   135  
   136  // Close stops the metrics collection, flushes any pending data to disk and closes
   137  // all io accesses to the underlying key-value store.
   138  func (db *Database) Close() error {
   139  	db.quitLock.Lock()
   140  	defer db.quitLock.Unlock()
   141  
   142  	if db.quitChan != nil {
   143  		errc := make(chan error)
   144  		db.quitChan <- errc
   145  		if err := <-errc; err != nil {
   146  			db.log.Error("Metrics collection failed", "err", err)
   147  		}
   148  		db.quitChan = nil
   149  	}
   150  	return db.db.Close()
   151  }
   152  
   153  // Has retrieves if a key is present in the key-value store.
   154  func (db *Database) Has(key []byte) (bool, error) {
   155  	return db.db.Has(key, nil)
   156  }
   157  
   158  // Get retrieves the given key if it's present in the key-value store.
   159  func (db *Database) Get(key []byte) ([]byte, error) {
   160  	dat, err := db.db.Get(key, nil)
   161  	if err != nil {
   162  		return nil, err
   163  	}
   164  	return dat, nil
   165  }
   166  
   167  // Put inserts the given value into the key-value store.
   168  func (db *Database) Put(key []byte, value []byte) error {
   169  	return db.db.Put(key, value, nil)
   170  }
   171  
   172  // Delete removes the key from the key-value store.
   173  func (db *Database) Delete(key []byte) error {
   174  	return db.db.Delete(key, nil)
   175  }
   176  
   177  // NewBatch creates a write-only key-value store that buffers changes to its host
   178  // database until a final write is called.
   179  func (db *Database) NewBatch() ethdb.Batch {
   180  	return &batch{
   181  		db: db.db,
   182  		b:  new(leveldb.Batch),
   183  	}
   184  }
   185  
   186  // NewIterator creates a binary-alphabetical iterator over a subset
   187  // of database content with a particular key prefix, starting at a particular
   188  // initial key (or after, if it does not exist).
   189  func (db *Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator {
   190  	return db.db.NewIterator(bytesPrefixRange(prefix, start), nil)
   191  }
   192  
   193  // Stat returns a particular internal stat of the database.
   194  func (db *Database) Stat(property string) (string, error) {
   195  	return db.db.GetProperty(property)
   196  }
   197  
   198  // Compact flattens the underlying data store for the given key range. In essence,
   199  // deleted and overwritten versions are discarded, and the data is rearranged to
   200  // reduce the cost of operations needed to access them.
   201  //
   202  // A nil start is treated as a key before all keys in the data store; a nil limit
   203  // is treated as a key after all keys in the data store. If both is nil then it
   204  // will compact entire data store.
   205  func (db *Database) Compact(start []byte, limit []byte) error {
   206  	return db.db.CompactRange(util.Range{Start: start, Limit: limit})
   207  }
   208  
   209  // Path returns the path to the database directory.
   210  func (db *Database) Path() string {
   211  	return db.fn
   212  }
   213  
   214  // meter periodically retrieves internal leveldb counters and reports them to
   215  // the metrics subsystem.
   216  //
   217  // This is how a LevelDB stats table looks like (currently):
   218  //   Compactions
   219  //    Level |   Tables   |    Size(MB)   |    Time(sec)  |    Read(MB)   |   Write(MB)
   220  //   -------+------------+---------------+---------------+---------------+---------------
   221  //      0   |          0 |       0.00000 |       1.27969 |       0.00000 |      12.31098
   222  //      1   |         85 |     109.27913 |      28.09293 |     213.92493 |     214.26294
   223  //      2   |        523 |    1000.37159 |       7.26059 |      66.86342 |      66.77884
   224  //      3   |        570 |    1113.18458 |       0.00000 |       0.00000 |       0.00000
   225  //
   226  // This is how the write delay look like (currently):
   227  // DelayN:5 Delay:406.604657ms Paused: false
   228  //
   229  // This is how the iostats look like (currently):
   230  // Read(MB):3895.04860 Write(MB):3654.64712
   231  func (db *Database) meter(refresh time.Duration) {
   232  	// Create the counters to store current and previous compaction values
   233  	compactions := make([][]float64, 2)
   234  	for i := 0; i < 2; i++ {
   235  		compactions[i] = make([]float64, 4)
   236  	}
   237  	// Create storage for iostats.
   238  	var iostats [2]float64
   239  
   240  	// Create storage and warning log tracer for write delay.
   241  	var (
   242  		delaystats      [2]int64
   243  		lastWritePaused time.Time
   244  	)
   245  
   246  	var (
   247  		errc chan error
   248  		merr error
   249  	)
   250  
   251  	timer := time.NewTimer(refresh)
   252  	defer timer.Stop()
   253  
   254  	// Iterate ad infinitum and collect the stats
   255  	for i := 1; errc == nil && merr == nil; i++ {
   256  		// Retrieve the database stats
   257  		stats, err := db.db.GetProperty("leveldb.stats")
   258  		if err != nil {
   259  			db.log.Error("Failed to read database stats", "err", err)
   260  			merr = err
   261  			continue
   262  		}
   263  		// Find the compaction table, skip the header
   264  		lines := strings.Split(stats, "\n")
   265  		for len(lines) > 0 && strings.TrimSpace(lines[0]) != "Compactions" {
   266  			lines = lines[1:]
   267  		}
   268  		if len(lines) <= 3 {
   269  			db.log.Error("Compaction leveldbTable not found")
   270  			merr = errors.New("compaction leveldbTable not found")
   271  			continue
   272  		}
   273  		lines = lines[3:]
   274  
   275  		// Iterate over all the leveldbTable rows, and accumulate the entries
   276  		for j := 0; j < len(compactions[i%2]); j++ {
   277  			compactions[i%2][j] = 0
   278  		}
   279  		for _, line := range lines {
   280  			parts := strings.Split(line, "|")
   281  			if len(parts) != 6 {
   282  				break
   283  			}
   284  			for idx, counter := range parts[2:] {
   285  				value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64)
   286  				if err != nil {
   287  					db.log.Error("Compaction entry parsing failed", "err", err)
   288  					merr = err
   289  					continue
   290  				}
   291  				compactions[i%2][idx] += value
   292  			}
   293  		}
   294  		// Update all the requested meters
   295  		if db.diskSizeGauge != nil {
   296  			db.diskSizeGauge.Update(int64(compactions[i%2][0] * 1024 * 1024))
   297  		}
   298  		if db.compTimeMeter != nil {
   299  			db.compTimeMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1000 * 1000 * 1000))
   300  		}
   301  		if db.compReadMeter != nil {
   302  			db.compReadMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024))
   303  		}
   304  		if db.compWriteMeter != nil {
   305  			db.compWriteMeter.Mark(int64((compactions[i%2][3] - compactions[(i-1)%2][3]) * 1024 * 1024))
   306  		}
   307  		// Retrieve the write delay statistic
   308  		writedelay, err := db.db.GetProperty("leveldb.writedelay")
   309  		if err != nil {
   310  			db.log.Error("Failed to read database write delay statistic", "err", err)
   311  			merr = err
   312  			continue
   313  		}
   314  		var (
   315  			delayN        int64
   316  			delayDuration string
   317  			duration      time.Duration
   318  			paused        bool
   319  		)
   320  		if n, err := fmt.Sscanf(writedelay, "DelayN:%d Delay:%s Paused:%t", &delayN, &delayDuration, &paused); n != 3 || err != nil {
   321  			db.log.Error("Write delay statistic not found")
   322  			merr = err
   323  			continue
   324  		}
   325  		duration, err = time.ParseDuration(delayDuration)
   326  		if err != nil {
   327  			db.log.Error("Failed to parse delay duration", "err", err)
   328  			merr = err
   329  			continue
   330  		}
   331  		if db.writeDelayNMeter != nil {
   332  			db.writeDelayNMeter.Mark(delayN - delaystats[0])
   333  		}
   334  		if db.writeDelayMeter != nil {
   335  			db.writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1])
   336  		}
   337  		// If a warning that db is performing compaction has been displayed, any subsequent
   338  		// warnings will be withheld for one minute not to overwhelm the user.
   339  		if paused && delayN-delaystats[0] == 0 && duration.Nanoseconds()-delaystats[1] == 0 &&
   340  			time.Now().After(lastWritePaused.Add(degradationWarnInterval)) {
   341  			db.log.Warn("Database compacting, degraded performance")
   342  			lastWritePaused = time.Now()
   343  		}
   344  		delaystats[0], delaystats[1] = delayN, duration.Nanoseconds()
   345  
   346  		// Retrieve the database iostats.
   347  		ioStats, err := db.db.GetProperty("leveldb.iostats")
   348  		if err != nil {
   349  			db.log.Error("Failed to read database iostats", "err", err)
   350  			merr = err
   351  			continue
   352  		}
   353  		var nRead, nWrite float64
   354  		parts := strings.Split(ioStats, " ")
   355  		if len(parts) < 2 {
   356  			db.log.Error("Bad syntax of ioStats", "ioStats", ioStats)
   357  			merr = fmt.Errorf("bad syntax of ioStats %s", ioStats)
   358  			continue
   359  		}
   360  		if n, err := fmt.Sscanf(parts[0], "Read(MB):%f", &nRead); n != 1 || err != nil {
   361  			db.log.Error("Bad syntax of read entry", "entry", parts[0])
   362  			merr = err
   363  			continue
   364  		}
   365  		if n, err := fmt.Sscanf(parts[1], "Write(MB):%f", &nWrite); n != 1 || err != nil {
   366  			db.log.Error("Bad syntax of write entry", "entry", parts[1])
   367  			merr = err
   368  			continue
   369  		}
   370  		if db.diskReadMeter != nil {
   371  			db.diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024))
   372  		}
   373  		if db.diskWriteMeter != nil {
   374  			db.diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024))
   375  		}
   376  		iostats[0], iostats[1] = nRead, nWrite
   377  
   378  		compCount, err := db.db.GetProperty("leveldb.compcount")
   379  		if err != nil {
   380  			db.log.Error("Failed to read database iostats", "err", err)
   381  			merr = err
   382  			continue
   383  		}
   384  
   385  		var (
   386  			memComp       uint32
   387  			level0Comp    uint32
   388  			nonLevel0Comp uint32
   389  			seekComp      uint32
   390  		)
   391  		if n, err := fmt.Sscanf(compCount, "MemComp:%d Level0Comp:%d NonLevel0Comp:%d SeekComp:%d", &memComp, &level0Comp, &nonLevel0Comp, &seekComp); n != 4 || err != nil {
   392  			db.log.Error("Compaction count statistic not found")
   393  			merr = err
   394  			continue
   395  		}
   396  		db.memCompGauge.Update(int64(memComp))
   397  		db.level0CompGauge.Update(int64(level0Comp))
   398  		db.nonlevel0CompGauge.Update(int64(nonLevel0Comp))
   399  		db.seekCompGauge.Update(int64(seekComp))
   400  
   401  		// Sleep a bit, then repeat the stats collection
   402  		select {
   403  		case errc = <-db.quitChan:
   404  			// Quit requesting, stop hammering the database
   405  		case <-timer.C:
   406  			timer.Reset(refresh)
   407  			// Timeout, gather a new set of stats
   408  		}
   409  	}
   410  
   411  	if errc == nil {
   412  		errc = <-db.quitChan
   413  	}
   414  	errc <- merr
   415  }
   416  
   417  // batch is a write-only leveldb batch that commits changes to its host database
   418  // when Write is called. A batch cannot be used concurrently.
   419  type batch struct {
   420  	db   *leveldb.DB
   421  	b    *leveldb.Batch
   422  	size int
   423  }
   424  
   425  // Put inserts the given value into the batch for later committing.
   426  func (b *batch) Put(key, value []byte) error {
   427  	b.b.Put(key, value)
   428  	b.size += len(value)
   429  	return nil
   430  }
   431  
   432  // Delete inserts the a key removal into the batch for later committing.
   433  func (b *batch) Delete(key []byte) error {
   434  	b.b.Delete(key)
   435  	b.size++
   436  	return nil
   437  }
   438  
   439  // ValueSize retrieves the amount of data queued up for writing.
   440  func (b *batch) ValueSize() int {
   441  	return b.size
   442  }
   443  
   444  // Write flushes any accumulated data to disk.
   445  func (b *batch) Write() error {
   446  	return b.db.Write(b.b, nil)
   447  }
   448  
   449  // Reset resets the batch for reuse.
   450  func (b *batch) Reset() {
   451  	b.b.Reset()
   452  	b.size = 0
   453  }
   454  
   455  // Replay replays the batch contents.
   456  func (b *batch) Replay(w ethdb.KeyValueWriter) error {
   457  	return b.b.Replay(&replayer{writer: w})
   458  }
   459  
   460  // replayer is a small wrapper to implement the correct replay methods.
   461  type replayer struct {
   462  	writer  ethdb.KeyValueWriter
   463  	failure error
   464  }
   465  
   466  // Put inserts the given value into the key-value data store.
   467  func (r *replayer) Put(key, value []byte) {
   468  	// If the replay already failed, stop executing ops
   469  	if r.failure != nil {
   470  		return
   471  	}
   472  	r.failure = r.writer.Put(key, value)
   473  }
   474  
   475  // Delete removes the key from the key-value data store.
   476  func (r *replayer) Delete(key []byte) {
   477  	// If the replay already failed, stop executing ops
   478  	if r.failure != nil {
   479  		return
   480  	}
   481  	r.failure = r.writer.Delete(key)
   482  }
   483  
   484  // bytesPrefixRange returns key range that satisfy
   485  // - the given prefix, and
   486  // - the given seek position
   487  func bytesPrefixRange(prefix, start []byte) *util.Range {
   488  	r := util.BytesPrefix(prefix)
   489  	r.Start = append(r.Start, start...)
   490  	return r
   491  }