github.com/core-coin/go-core/v2@v2.1.9/xcbdb/leveldb/leveldb.go (about)

     1  // Copyright 2018 by the Authors
     2  // This file is part of the go-core library.
     3  //
     4  // The go-core library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-core library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-core library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  //go:build !js
    18  // +build !js
    19  
    20  // Package leveldb implements the key-value database layer based on LevelDB.
    21  package leveldb
    22  
    23  import (
    24  	"fmt"
    25  	"strconv"
    26  	"strings"
    27  	"sync"
    28  	"time"
    29  
    30  	"github.com/syndtr/goleveldb/leveldb"
    31  	"github.com/syndtr/goleveldb/leveldb/errors"
    32  	"github.com/syndtr/goleveldb/leveldb/filter"
    33  	"github.com/syndtr/goleveldb/leveldb/opt"
    34  	"github.com/syndtr/goleveldb/leveldb/util"
    35  
    36  	"github.com/core-coin/go-core/v2/xcbdb"
    37  
    38  	"github.com/core-coin/go-core/v2/common"
    39  	"github.com/core-coin/go-core/v2/log"
    40  	"github.com/core-coin/go-core/v2/metrics"
    41  )
    42  
    43  const (
    44  	// degradationWarnInterval specifies how often warning should be printed if the
    45  	// leveldb database cannot keep up with requested writes.
    46  	degradationWarnInterval = time.Minute
    47  
    48  	// minCache is the minimum amount of memory in megabytes to allocate to leveldb
    49  	// read and write caching, split half and half.
    50  	minCache = 16
    51  
    52  	// minHandles is the minimum number of files handles to allocate to the open
    53  	// database files.
    54  	minHandles = 16
    55  
    56  	// metricsGatheringInterval specifies the interval to retrieve leveldb database
    57  	// compaction, io and pause stats to report to the user.
    58  	metricsGatheringInterval = 3 * time.Second
    59  )
    60  
    61  // Database is a persistent key-value store. Apart from basic data storage
    62  // functionality it also supports batch writes and iterating over the keyspace in
    63  // binary-alphabetical order.
    64  type Database struct {
    65  	fn string      // filename for reporting
    66  	db *leveldb.DB // LevelDB instance
    67  
    68  	compTimeMeter      metrics.Meter // Meter for measuring the total time spent in database compaction
    69  	compReadMeter      metrics.Meter // Meter for measuring the data read during compaction
    70  	compWriteMeter     metrics.Meter // Meter for measuring the data written during compaction
    71  	writeDelayNMeter   metrics.Meter // Meter for measuring the write delay number due to database compaction
    72  	writeDelayMeter    metrics.Meter // Meter for measuring the write delay duration due to database compaction
    73  	diskSizeGauge      metrics.Gauge // Gauge for tracking the size of all the levels in the database
    74  	diskReadMeter      metrics.Meter // Meter for measuring the effective amount of data read
    75  	diskWriteMeter     metrics.Meter // Meter for measuring the effective amount of data written
    76  	memCompGauge       metrics.Gauge // Gauge for tracking the number of memory compaction
    77  	level0CompGauge    metrics.Gauge // Gauge for tracking the number of table compaction in level0
    78  	nonlevel0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in non0 level
    79  	seekCompGauge      metrics.Gauge // Gauge for tracking the number of table compaction caused by read opt
    80  
    81  	quitLock sync.Mutex      // Mutex protecting the quit channel access
    82  	quitChan chan chan error // Quit channel to stop the metrics collection before closing the database
    83  
    84  	log log.Logger // Contextual logger tracking the database path
    85  }
    86  
    87  // New returns a wrapped LevelDB object. The namespace is the prefix that the
    88  // metrics reporting should use for surfacing internal stats.
    89  func New(file string, cache int, handles int, namespace string) (*Database, error) {
    90  	// Ensure we have some minimal caching and file guarantees
    91  	if cache < minCache {
    92  		cache = minCache
    93  	}
    94  	if handles < minHandles {
    95  		handles = minHandles
    96  	}
    97  	logger := log.New("database", file)
    98  	logger.Info("Allocated cache and file handles", "cache", common.StorageSize(cache*1024*1024), "handles", handles)
    99  
   100  	// Open the db and recover any potential corruptions
   101  	db, err := leveldb.OpenFile(file, &opt.Options{
   102  		OpenFilesCacheCapacity: handles,
   103  		BlockCacheCapacity:     cache / 2 * opt.MiB,
   104  		WriteBuffer:            cache / 4 * opt.MiB, // Two of these are used internally
   105  		Filter:                 filter.NewBloomFilter(10),
   106  		DisableSeeksCompaction: true,
   107  	})
   108  	if _, corrupted := err.(*errors.ErrCorrupted); corrupted {
   109  		db, err = leveldb.RecoverFile(file, nil)
   110  	}
   111  	if err != nil {
   112  		return nil, err
   113  	}
   114  	// Assemble the wrapper with all the registered metrics
   115  	ldb := &Database{
   116  		fn:       file,
   117  		db:       db,
   118  		log:      logger,
   119  		quitChan: make(chan chan error),
   120  	}
   121  	ldb.compTimeMeter = metrics.NewRegisteredMeter(namespace+"compact/time", nil)
   122  	ldb.compReadMeter = metrics.NewRegisteredMeter(namespace+"compact/input", nil)
   123  	ldb.compWriteMeter = metrics.NewRegisteredMeter(namespace+"compact/output", nil)
   124  	ldb.diskSizeGauge = metrics.NewRegisteredGauge(namespace+"disk/size", nil)
   125  	ldb.diskReadMeter = metrics.NewRegisteredMeter(namespace+"disk/read", nil)
   126  	ldb.diskWriteMeter = metrics.NewRegisteredMeter(namespace+"disk/write", nil)
   127  	ldb.writeDelayMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/duration", nil)
   128  	ldb.writeDelayNMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/counter", nil)
   129  	ldb.memCompGauge = metrics.NewRegisteredGauge(namespace+"compact/memory", nil)
   130  	ldb.level0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/level0", nil)
   131  	ldb.nonlevel0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/nonlevel0", nil)
   132  	ldb.seekCompGauge = metrics.NewRegisteredGauge(namespace+"compact/seek", nil)
   133  
   134  	// Start up the metrics gathering and return
   135  	go ldb.meter(metricsGatheringInterval)
   136  	return ldb, nil
   137  }
   138  
   139  // Close stops the metrics collection, flushes any pending data to disk and closes
   140  // all io accesses to the underlying key-value store.
   141  func (db *Database) Close() error {
   142  	db.quitLock.Lock()
   143  	defer db.quitLock.Unlock()
   144  
   145  	if db.quitChan != nil {
   146  		errc := make(chan error)
   147  		db.quitChan <- errc
   148  		if err := <-errc; err != nil {
   149  			db.log.Error("Metrics collection failed", "err", err)
   150  		}
   151  		db.quitChan = nil
   152  	}
   153  	return db.db.Close()
   154  }
   155  
   156  // Has retrieves if a key is present in the key-value store.
   157  func (db *Database) Has(key []byte) (bool, error) {
   158  	return db.db.Has(key, nil)
   159  }
   160  
   161  // Get retrieves the given key if it's present in the key-value store.
   162  func (db *Database) Get(key []byte) ([]byte, error) {
   163  	dat, err := db.db.Get(key, nil)
   164  	if err != nil {
   165  		return nil, err
   166  	}
   167  	return dat, nil
   168  }
   169  
   170  // Put inserts the given value into the key-value store.
   171  func (db *Database) Put(key []byte, value []byte) error {
   172  	return db.db.Put(key, value, nil)
   173  }
   174  
   175  // Delete removes the key from the key-value store.
   176  func (db *Database) Delete(key []byte) error {
   177  	return db.db.Delete(key, nil)
   178  }
   179  
   180  // NewBatch creates a write-only key-value store that buffers changes to its host
   181  // database until a final write is called.
   182  func (db *Database) NewBatch() xcbdb.Batch {
   183  	return &batch{
   184  		db: db.db,
   185  		b:  new(leveldb.Batch),
   186  	}
   187  }
   188  
   189  // NewIterator creates a binary-alphabetical iterator over a subset
   190  // of database content with a particular key prefix, starting at a particular
   191  // initial key (or after, if it does not exist).
   192  func (db *Database) NewIterator(prefix []byte, start []byte) xcbdb.Iterator {
   193  	return db.db.NewIterator(bytesPrefixRange(prefix, start), nil)
   194  }
   195  
   196  // Stat returns a particular internal stat of the database.
   197  func (db *Database) Stat(property string) (string, error) {
   198  	return db.db.GetProperty(property)
   199  }
   200  
   201  // Compact flattens the underlying data store for the given key range. In essence,
   202  // deleted and overwritten versions are discarded, and the data is rearranged to
   203  // reduce the cost of operations needed to access them.
   204  //
   205  // A nil start is treated as a key before all keys in the data store; a nil limit
   206  // is treated as a key after all keys in the data store. If both is nil then it
   207  // will compact entire data store.
   208  func (db *Database) Compact(start []byte, limit []byte) error {
   209  	return db.db.CompactRange(util.Range{Start: start, Limit: limit})
   210  }
   211  
   212  // Path returns the path to the database directory.
   213  func (db *Database) Path() string {
   214  	return db.fn
   215  }
   216  
   217  // meter periodically retrieves internal leveldb counters and reports them to
   218  // the metrics subsystem.
   219  //
   220  // This is how a LevelDB stats table looks like (currently):
   221  //
   222  //	Compactions
   223  //	 Level |   Tables   |    Size(MB)   |    Time(sec)  |    Read(MB)   |   Write(MB)
   224  //	-------+------------+---------------+---------------+---------------+---------------
   225  //	   0   |          0 |       0.00000 |       1.27969 |       0.00000 |      12.31098
   226  //	   1   |         85 |     109.27913 |      28.09293 |     213.92493 |     214.26294
   227  //	   2   |        523 |    1000.37159 |       7.26059 |      66.86342 |      66.77884
   228  //	   3   |        570 |    1113.18458 |       0.00000 |       0.00000 |       0.00000
   229  //
   230  // This is how the write delay look like (currently):
   231  // DelayN:5 Delay:406.604657ms Paused: false
   232  //
   233  // This is how the iostats look like (currently):
   234  // Read(MB):3895.04860 Write(MB):3654.64712
   235  func (db *Database) meter(refresh time.Duration) {
   236  	// Create the counters to store current and previous compaction values
   237  	compactions := make([][]float64, 2)
   238  	for i := 0; i < 2; i++ {
   239  		compactions[i] = make([]float64, 4)
   240  	}
   241  	// Create storage for iostats.
   242  	var iostats [2]float64
   243  
   244  	// Create storage and warning log tracer for write delay.
   245  	var (
   246  		delaystats      [2]int64
   247  		lastWritePaused time.Time
   248  	)
   249  
   250  	var (
   251  		errc chan error
   252  		merr error
   253  	)
   254  
   255  	timer := time.NewTimer(refresh)
   256  	defer timer.Stop()
   257  
   258  	// Iterate ad infinitum and collect the stats
   259  	for i := 1; errc == nil && merr == nil; i++ {
   260  		// Retrieve the database stats
   261  		stats, err := db.db.GetProperty("leveldb.stats")
   262  		if err != nil {
   263  			db.log.Error("Failed to read database stats", "err", err)
   264  			merr = err
   265  			continue
   266  		}
   267  		// Find the compaction table, skip the header
   268  		lines := strings.Split(stats, "\n")
   269  		for len(lines) > 0 && strings.TrimSpace(lines[0]) != "Compactions" {
   270  			lines = lines[1:]
   271  		}
   272  		if len(lines) <= 3 {
   273  			db.log.Error("Compaction leveldbTable not found")
   274  			merr = errors.New("compaction leveldbTable not found")
   275  			continue
   276  		}
   277  		lines = lines[3:]
   278  
   279  		// Iterate over all the leveldbTable rows, and accumulate the entries
   280  		for j := 0; j < len(compactions[i%2]); j++ {
   281  			compactions[i%2][j] = 0
   282  		}
   283  		for _, line := range lines {
   284  			parts := strings.Split(line, "|")
   285  			if len(parts) != 6 {
   286  				break
   287  			}
   288  			for idx, counter := range parts[2:] {
   289  				value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64)
   290  				if err != nil {
   291  					db.log.Error("Compaction entry parsing failed", "err", err)
   292  					merr = err
   293  					continue
   294  				}
   295  				compactions[i%2][idx] += value
   296  			}
   297  		}
   298  		// Update all the requested meters
   299  		if db.diskSizeGauge != nil {
   300  			db.diskSizeGauge.Update(int64(compactions[i%2][0] * 1024 * 1024))
   301  		}
   302  		if db.compTimeMeter != nil {
   303  			db.compTimeMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1000 * 1000 * 1000))
   304  		}
   305  		if db.compReadMeter != nil {
   306  			db.compReadMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024))
   307  		}
   308  		if db.compWriteMeter != nil {
   309  			db.compWriteMeter.Mark(int64((compactions[i%2][3] - compactions[(i-1)%2][3]) * 1024 * 1024))
   310  		}
   311  		// Retrieve the write delay statistic
   312  		writedelay, err := db.db.GetProperty("leveldb.writedelay")
   313  		if err != nil {
   314  			db.log.Error("Failed to read database write delay statistic", "err", err)
   315  			merr = err
   316  			continue
   317  		}
   318  		var (
   319  			delayN        int64
   320  			delayDuration string
   321  			duration      time.Duration
   322  			paused        bool
   323  		)
   324  		if n, err := fmt.Sscanf(writedelay, "DelayN:%d Delay:%s Paused:%t", &delayN, &delayDuration, &paused); n != 3 || err != nil {
   325  			db.log.Error("Write delay statistic not found")
   326  			merr = err
   327  			continue
   328  		}
   329  		duration, err = time.ParseDuration(delayDuration)
   330  		if err != nil {
   331  			db.log.Error("Failed to parse delay duration", "err", err)
   332  			merr = err
   333  			continue
   334  		}
   335  		if db.writeDelayNMeter != nil {
   336  			db.writeDelayNMeter.Mark(delayN - delaystats[0])
   337  		}
   338  		if db.writeDelayMeter != nil {
   339  			db.writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1])
   340  		}
   341  		// If a warning that db is performing compaction has been displayed, any subsequent
   342  		// warnings will be withheld for one minute not to overwhelm the user.
   343  		if paused && delayN-delaystats[0] == 0 && duration.Nanoseconds()-delaystats[1] == 0 &&
   344  			time.Now().After(lastWritePaused.Add(degradationWarnInterval)) {
   345  			db.log.Warn("Database compacting, degraded performance")
   346  			lastWritePaused = time.Now()
   347  		}
   348  		delaystats[0], delaystats[1] = delayN, duration.Nanoseconds()
   349  
   350  		// Retrieve the database iostats.
   351  		ioStats, err := db.db.GetProperty("leveldb.iostats")
   352  		if err != nil {
   353  			db.log.Error("Failed to read database iostats", "err", err)
   354  			merr = err
   355  			continue
   356  		}
   357  		var nRead, nWrite float64
   358  		parts := strings.Split(ioStats, " ")
   359  		if len(parts) < 2 {
   360  			db.log.Error("Bad syntax of ioStats", "ioStats", ioStats)
   361  			merr = fmt.Errorf("bad syntax of ioStats %s", ioStats)
   362  			continue
   363  		}
   364  		if n, err := fmt.Sscanf(parts[0], "Read(MB):%f", &nRead); n != 1 || err != nil {
   365  			db.log.Error("Bad syntax of read entry", "entry", parts[0])
   366  			merr = err
   367  			continue
   368  		}
   369  		if n, err := fmt.Sscanf(parts[1], "Write(MB):%f", &nWrite); n != 1 || err != nil {
   370  			db.log.Error("Bad syntax of write entry", "entry", parts[1])
   371  			merr = err
   372  			continue
   373  		}
   374  		if db.diskReadMeter != nil {
   375  			db.diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024))
   376  		}
   377  		if db.diskWriteMeter != nil {
   378  			db.diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024))
   379  		}
   380  		iostats[0], iostats[1] = nRead, nWrite
   381  
   382  		compCount, err := db.db.GetProperty("leveldb.compcount")
   383  		if err != nil {
   384  			db.log.Error("Failed to read database iostats", "err", err)
   385  			merr = err
   386  			continue
   387  		}
   388  
   389  		var (
   390  			memComp       uint32
   391  			level0Comp    uint32
   392  			nonLevel0Comp uint32
   393  			seekComp      uint32
   394  		)
   395  		if n, err := fmt.Sscanf(compCount, "MemComp:%d Level0Comp:%d NonLevel0Comp:%d SeekComp:%d", &memComp, &level0Comp, &nonLevel0Comp, &seekComp); n != 4 || err != nil {
   396  			db.log.Error("Compaction count statistic not found")
   397  			merr = err
   398  			continue
   399  		}
   400  		db.memCompGauge.Update(int64(memComp))
   401  		db.level0CompGauge.Update(int64(level0Comp))
   402  		db.nonlevel0CompGauge.Update(int64(nonLevel0Comp))
   403  		db.seekCompGauge.Update(int64(seekComp))
   404  
   405  		// Sleep a bit, then repeat the stats collection
   406  		select {
   407  		case errc = <-db.quitChan:
   408  			// Quit requesting, stop hammering the database
   409  		case <-timer.C:
   410  			timer.Reset(refresh)
   411  			// Timeout, gather a new set of stats
   412  		}
   413  	}
   414  
   415  	if errc == nil {
   416  		errc = <-db.quitChan
   417  	}
   418  	errc <- merr
   419  }
   420  
   421  // batch is a write-only leveldb batch that commits changes to its host database
   422  // when Write is called. A batch cannot be used concurrently.
   423  type batch struct {
   424  	db   *leveldb.DB
   425  	b    *leveldb.Batch
   426  	size int
   427  }
   428  
   429  // Put inserts the given value into the batch for later committing.
   430  func (b *batch) Put(key, value []byte) error {
   431  	b.b.Put(key, value)
   432  	b.size += len(value)
   433  	return nil
   434  }
   435  
   436  // Delete inserts the a key removal into the batch for later committing.
   437  func (b *batch) Delete(key []byte) error {
   438  	b.b.Delete(key)
   439  	b.size++
   440  	return nil
   441  }
   442  
   443  // ValueSize retrieves the amount of data queued up for writing.
   444  func (b *batch) ValueSize() int {
   445  	return b.size
   446  }
   447  
   448  // Write flushes any accumulated data to disk.
   449  func (b *batch) Write() error {
   450  	return b.db.Write(b.b, nil)
   451  }
   452  
   453  // Reset resets the batch for reuse.
   454  func (b *batch) Reset() {
   455  	b.b.Reset()
   456  	b.size = 0
   457  }
   458  
   459  // Replay replays the batch contents.
   460  func (b *batch) Replay(w xcbdb.KeyValueWriter) error {
   461  	return b.b.Replay(&replayer{writer: w})
   462  }
   463  
   464  // replayer is a small wrapper to implement the correct replay methods.
   465  type replayer struct {
   466  	writer  xcbdb.KeyValueWriter
   467  	failure error
   468  }
   469  
   470  // Put inserts the given value into the key-value data store.
   471  func (r *replayer) Put(key, value []byte) {
   472  	// If the replay already failed, stop executing ops
   473  	if r.failure != nil {
   474  		return
   475  	}
   476  	r.failure = r.writer.Put(key, value)
   477  }
   478  
   479  // Delete removes the key from the key-value data store.
   480  func (r *replayer) Delete(key []byte) {
   481  	// If the replay already failed, stop executing ops
   482  	if r.failure != nil {
   483  		return
   484  	}
   485  	r.failure = r.writer.Delete(key)
   486  }
   487  
   488  // bytesPrefixRange returns key range that satisfy
   489  // - the given prefix, and
   490  // - the given seek position
   491  func bytesPrefixRange(prefix, start []byte) *util.Range {
   492  	r := util.BytesPrefix(prefix)
   493  	r.Start = append(r.Start, start...)
   494  	return r
   495  }