github.com/aquanetwork/aquachain@v1.7.8/aquadb/database.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package aquadb
    18  
    19  import (
    20  	"fmt"
    21  	"strconv"
    22  	"strings"
    23  	"sync"
    24  	"time"
    25  
    26  	"github.com/syndtr/goleveldb/leveldb"
    27  	"github.com/syndtr/goleveldb/leveldb/errors"
    28  	"github.com/syndtr/goleveldb/leveldb/filter"
    29  	"github.com/syndtr/goleveldb/leveldb/iterator"
    30  	"github.com/syndtr/goleveldb/leveldb/opt"
    31  	"github.com/syndtr/goleveldb/leveldb/util"
    32  	"gitlab.com/aquachain/aquachain/common/log"
    33  	"gitlab.com/aquachain/aquachain/common/metrics"
    34  )
    35  
    36  const (
    37  	writePauseWarningThrottler = 1 * time.Minute
    38  )
    39  
    40  var OpenFileLimit = 64
    41  
    42  type LDBDatabase struct {
    43  	fn string      // filename for reporting
    44  	db *leveldb.DB // LevelDB instance
    45  
    46  	compTimeMeter    metrics.Meter // Meter for measuring the total time spent in database compaction
    47  	compReadMeter    metrics.Meter // Meter for measuring the data read during compaction
    48  	compWriteMeter   metrics.Meter // Meter for measuring the data written during compaction
    49  	writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction
    50  	writeDelayMeter  metrics.Meter // Meter for measuring the write delay duration due to database compaction
    51  	diskReadMeter    metrics.Meter // Meter for measuring the effective amount of data read
    52  	diskWriteMeter   metrics.Meter // Meter for measuring the effective amount of data written
    53  
    54  	quitLock sync.Mutex      // Mutex protecting the quit channel access
    55  	quitChan chan chan error // Quit channel to stop the metrics collection before closing the database
    56  
    57  	log log.Logger // Contextual logger tracking the database path
    58  }
    59  
    60  // NewLDBDatabase returns a LevelDB wrapped object.
    61  func NewLDBDatabase(file string, cache int, handles int) (*LDBDatabase, error) {
    62  	logger := log.New("database", file)
    63  
    64  	// Ensure we have some minimal caching and file guarantees
    65  	if cache < 16 {
    66  		cache = 16
    67  	}
    68  	if handles < 16 {
    69  		handles = 16
    70  	}
    71  	logger.Info("Allocated cache and file handles", "cache", cache, "handles", handles)
    72  
    73  	// Open the db and recover any potential corruptions
    74  	db, err := leveldb.OpenFile(file, &opt.Options{
    75  		OpenFilesCacheCapacity: handles,
    76  		BlockCacheCapacity:     cache / 2 * opt.MiB,
    77  		WriteBuffer:            cache / 4 * opt.MiB, // Two of these are used internally
    78  		Filter:                 filter.NewBloomFilter(10),
    79  	})
    80  	if _, corrupted := err.(*errors.ErrCorrupted); corrupted {
    81  		db, err = leveldb.RecoverFile(file, nil)
    82  	}
    83  	// (Re)check for errors and abort if opening of the db failed
    84  	if err != nil {
    85  		return nil, err
    86  	}
    87  	return &LDBDatabase{
    88  		fn:  file,
    89  		db:  db,
    90  		log: logger,
    91  	}, nil
    92  }
    93  
    94  // Path returns the path to the database directory.
    95  func (db *LDBDatabase) Path() string {
    96  	return db.fn
    97  }
    98  
    99  // Put puts the given key / value to the queue
   100  func (db *LDBDatabase) Put(key []byte, value []byte) error {
   101  	return db.db.Put(key, value, nil)
   102  }
   103  
   104  func (db *LDBDatabase) Has(key []byte) (bool, error) {
   105  	return db.db.Has(key, nil)
   106  }
   107  
   108  // Get returns the given key if it's present.
   109  func (db *LDBDatabase) Get(key []byte) ([]byte, error) {
   110  	dat, err := db.db.Get(key, nil)
   111  	if err != nil {
   112  		return nil, err
   113  	}
   114  	return dat, nil
   115  }
   116  
   117  // Delete deletes the key from the queue and database
   118  func (db *LDBDatabase) Delete(key []byte) error {
   119  	return db.db.Delete(key, nil)
   120  }
   121  
   122  func (db *LDBDatabase) NewIterator() iterator.Iterator {
   123  	return db.db.NewIterator(nil, nil)
   124  }
   125  
   126  // NewIteratorWithPrefix returns a iterator to iterate over subset of database content with a particular prefix.
   127  func (db *LDBDatabase) NewIteratorWithPrefix(prefix []byte) iterator.Iterator {
   128  	return db.db.NewIterator(util.BytesPrefix(prefix), nil)
   129  }
   130  
   131  func (db *LDBDatabase) Close() {
   132  	// Stop the metrics collection to avoid internal database races
   133  	db.quitLock.Lock()
   134  	defer db.quitLock.Unlock()
   135  
   136  	if db.quitChan != nil {
   137  		errc := make(chan error)
   138  		db.quitChan <- errc
   139  		if err := <-errc; err != nil {
   140  			db.log.Error("Metrics collection failed", "err", err)
   141  		}
   142  		db.quitChan = nil
   143  	}
   144  	err := db.db.Close()
   145  	if err == nil {
   146  		db.log.Info("Database closed")
   147  	} else {
   148  		db.log.Error("Failed to close database", "err", err)
   149  	}
   150  }
   151  
   152  func (db *LDBDatabase) LDB() *leveldb.DB {
   153  	return db.db
   154  }
   155  
   156  // Meter configures the database metrics collectors and
   157  func (db *LDBDatabase) Meter(prefix string) {
   158  	if metrics.Enabled {
   159  		// Initialize all the metrics collector at the requested prefix
   160  		db.compTimeMeter = metrics.NewRegisteredMeter(prefix+"compact/time", nil)
   161  		db.compReadMeter = metrics.NewRegisteredMeter(prefix+"compact/input", nil)
   162  		db.compWriteMeter = metrics.NewRegisteredMeter(prefix+"compact/output", nil)
   163  		db.diskReadMeter = metrics.NewRegisteredMeter(prefix+"disk/read", nil)
   164  		db.diskWriteMeter = metrics.NewRegisteredMeter(prefix+"disk/write", nil)
   165  	}
   166  	// Initialize write delay metrics no matter we are in metric mode or not.
   167  	db.writeDelayMeter = metrics.NewRegisteredMeter(prefix+"compact/writedelay/duration", nil)
   168  	db.writeDelayNMeter = metrics.NewRegisteredMeter(prefix+"compact/writedelay/counter", nil)
   169  
   170  	// Create a quit channel for the periodic collector and run it
   171  	db.quitLock.Lock()
   172  	db.quitChan = make(chan chan error)
   173  	db.quitLock.Unlock()
   174  
   175  	go db.meter(3 * time.Second)
   176  }
   177  
   178  // meter periodically retrieves internal leveldb counters and reports them to
   179  // the metrics subsystem.
   180  //
   181  // This is how a stats table look like (currently):
   182  //   Compactions
   183  //    Level |   Tables   |    Size(MB)   |    Time(sec)  |    Read(MB)   |   Write(MB)
   184  //   -------+------------+---------------+---------------+---------------+---------------
   185  //      0   |          0 |       0.00000 |       1.27969 |       0.00000 |      12.31098
   186  //      1   |         85 |     109.27913 |      28.09293 |     213.92493 |     214.26294
   187  //      2   |        523 |    1000.37159 |       7.26059 |      66.86342 |      66.77884
   188  //      3   |        570 |    1113.18458 |       0.00000 |       0.00000 |       0.00000
   189  //
   190  // This is how the write delay look like (currently):
   191  // DelayN:5 Delay:406.604657ms Paused: false
   192  //
   193  // This is how the iostats look like (currently):
   194  // Read(MB):3895.04860 Write(MB):3654.64712
   195  func (db *LDBDatabase) meter(refresh time.Duration) {
   196  	// Create the counters to store current and previous compaction values
   197  	compactions := make([][]float64, 2)
   198  	for i := 0; i < 2; i++ {
   199  		compactions[i] = make([]float64, 3)
   200  	}
   201  	// Create storage for iostats.
   202  	var iostats [2]float64
   203  
   204  	// Create storage and warning log tracer for write delay.
   205  	var (
   206  		delaystats      [2]int64
   207  		lastWritePaused time.Time
   208  	)
   209  
   210  	var (
   211  		errc chan error
   212  		merr error
   213  	)
   214  
   215  	// Iterate ad infinitum and collect the stats
   216  	for i := 1; errc == nil && merr == nil; i++ {
   217  		// Retrieve the database stats
   218  		stats, err := db.db.GetProperty("leveldb.stats")
   219  		if err != nil {
   220  			db.log.Error("Failed to read database stats", "err", err)
   221  			merr = err
   222  			continue
   223  		}
   224  		// Find the compaction table, skip the header
   225  		lines := strings.Split(stats, "\n")
   226  		for len(lines) > 0 && strings.TrimSpace(lines[0]) != "Compactions" {
   227  			lines = lines[1:]
   228  		}
   229  		if len(lines) <= 3 {
   230  			db.log.Error("Compaction table not found")
   231  			merr = errors.New("compaction table not found")
   232  			continue
   233  		}
   234  		lines = lines[3:]
   235  
   236  		// Iterate over all the table rows, and accumulate the entries
   237  		for j := 0; j < len(compactions[i%2]); j++ {
   238  			compactions[i%2][j] = 0
   239  		}
   240  		for _, line := range lines {
   241  			parts := strings.Split(line, "|")
   242  			if len(parts) != 6 {
   243  				break
   244  			}
   245  			for idx, counter := range parts[3:] {
   246  				value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64)
   247  				if err != nil {
   248  					db.log.Error("Compaction entry parsing failed", "err", err)
   249  					merr = err
   250  					continue
   251  				}
   252  				compactions[i%2][idx] += value
   253  			}
   254  		}
   255  		// Update all the requested meters
   256  		if db.compTimeMeter != nil {
   257  			db.compTimeMeter.Mark(int64((compactions[i%2][0] - compactions[(i-1)%2][0]) * 1000 * 1000 * 1000))
   258  		}
   259  		if db.compReadMeter != nil {
   260  			db.compReadMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1024 * 1024))
   261  		}
   262  		if db.compWriteMeter != nil {
   263  			db.compWriteMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024))
   264  		}
   265  
   266  		// Retrieve the write delay statistic
   267  		writedelay, err := db.db.GetProperty("leveldb.writedelay")
   268  		if err != nil {
   269  			db.log.Error("Failed to read database write delay statistic", "err", err)
   270  			merr = err
   271  			continue
   272  		}
   273  		var (
   274  			delayN        int64
   275  			delayDuration string
   276  			duration      time.Duration
   277  			paused        bool
   278  		)
   279  		if n, err := fmt.Sscanf(writedelay, "DelayN:%d Delay:%s Paused:%t", &delayN, &delayDuration, &paused); n != 3 || err != nil {
   280  			db.log.Error("Write delay statistic not found")
   281  			merr = err
   282  			continue
   283  		}
   284  		duration, err = time.ParseDuration(delayDuration)
   285  		if err != nil {
   286  			db.log.Error("Failed to parse delay duration", "err", err)
   287  			merr = err
   288  			continue
   289  		}
   290  		if db.writeDelayNMeter != nil {
   291  			db.writeDelayNMeter.Mark(delayN - delaystats[0])
   292  		}
   293  		if db.writeDelayMeter != nil {
   294  			db.writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1])
   295  		}
   296  		// If a warning that db is performing compaction has been displayed, any subsequent
   297  		// warnings will be withheld for one minute not to overwhelm the user.
   298  		if paused && delayN-delaystats[0] == 0 && duration.Nanoseconds()-delaystats[1] == 0 &&
   299  			time.Now().After(lastWritePaused.Add(writePauseWarningThrottler)) {
   300  			db.log.Warn("Database compacting, degraded performance")
   301  			lastWritePaused = time.Now()
   302  		}
   303  		delaystats[0], delaystats[1] = delayN, duration.Nanoseconds()
   304  
   305  		// Retrieve the database iostats.
   306  		ioStats, err := db.db.GetProperty("leveldb.iostats")
   307  		if err != nil {
   308  			db.log.Error("Failed to read database iostats", "err", err)
   309  			merr = err
   310  			continue
   311  		}
   312  		var nRead, nWrite float64
   313  		parts := strings.Split(ioStats, " ")
   314  		if len(parts) < 2 {
   315  			db.log.Error("Bad syntax of ioStats", "ioStats", ioStats)
   316  			merr = fmt.Errorf("bad syntax of ioStats %s", ioStats)
   317  			continue
   318  		}
   319  		if n, err := fmt.Sscanf(parts[0], "Read(MB):%f", &nRead); n != 1 || err != nil {
   320  			db.log.Error("Bad syntax of read entry", "entry", parts[0])
   321  			merr = err
   322  			continue
   323  		}
   324  		if n, err := fmt.Sscanf(parts[1], "Write(MB):%f", &nWrite); n != 1 || err != nil {
   325  			db.log.Error("Bad syntax of write entry", "entry", parts[1])
   326  			merr = err
   327  			continue
   328  		}
   329  		if db.diskReadMeter != nil {
   330  			db.diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024))
   331  		}
   332  		if db.diskWriteMeter != nil {
   333  			db.diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024))
   334  		}
   335  		iostats[0], iostats[1] = nRead, nWrite
   336  
   337  		// Sleep a bit, then repeat the stats collection
   338  		select {
   339  		case errc = <-db.quitChan:
   340  			// Quit requesting, stop hammering the database
   341  		case <-time.After(refresh):
   342  			// Timeout, gather a new set of stats
   343  		}
   344  	}
   345  
   346  	if errc == nil {
   347  		errc = <-db.quitChan
   348  	}
   349  	errc <- merr
   350  }
   351  
   352  func (db *LDBDatabase) NewBatch() Batch {
   353  	return &ldbBatch{db: db.db, b: new(leveldb.Batch)}
   354  }
   355  
   356  type ldbBatch struct {
   357  	db   *leveldb.DB
   358  	b    *leveldb.Batch
   359  	size int
   360  }
   361  
   362  func (b *ldbBatch) Put(key, value []byte) error {
   363  	b.b.Put(key, value)
   364  	b.size += len(value)
   365  	return nil
   366  }
   367  
   368  func (b *ldbBatch) Delete(key []byte) error {
   369  	b.b.Delete(key)
   370  	b.size += 1
   371  	return nil
   372  }
   373  
   374  func (b *ldbBatch) Write() error {
   375  	return b.db.Write(b.b, nil)
   376  }
   377  
   378  func (b *ldbBatch) ValueSize() int {
   379  	return b.size
   380  }
   381  
   382  func (b *ldbBatch) Reset() {
   383  	b.b.Reset()
   384  	b.size = 0
   385  }
   386  
   387  type table struct {
   388  	db     Database
   389  	prefix string
   390  }
   391  
   392  // NewTable returns a Database object that prefixes all keys with a given
   393  // string.
   394  func NewTable(db Database, prefix string) Database {
   395  	return &table{
   396  		db:     db,
   397  		prefix: prefix,
   398  	}
   399  }
   400  
   401  func (dt *table) Put(key []byte, value []byte) error {
   402  	return dt.db.Put(append([]byte(dt.prefix), key...), value)
   403  }
   404  
   405  func (dt *table) Has(key []byte) (bool, error) {
   406  	return dt.db.Has(append([]byte(dt.prefix), key...))
   407  }
   408  
   409  func (dt *table) Get(key []byte) ([]byte, error) {
   410  	return dt.db.Get(append([]byte(dt.prefix), key...))
   411  }
   412  
   413  func (dt *table) Delete(key []byte) error {
   414  	return dt.db.Delete(append([]byte(dt.prefix), key...))
   415  }
   416  
   417  func (dt *table) Close() {
   418  	// Do nothing; don't close the underlying DB.
   419  }
   420  
   421  type tableBatch struct {
   422  	batch  Batch
   423  	prefix string
   424  }
   425  
   426  // NewTableBatch returns a Batch object which prefixes all keys with a given string.
   427  func NewTableBatch(db Database, prefix string) Batch {
   428  	return &tableBatch{db.NewBatch(), prefix}
   429  }
   430  
   431  func (dt *table) NewBatch() Batch {
   432  	return &tableBatch{dt.db.NewBatch(), dt.prefix}
   433  }
   434  
   435  func (tb *tableBatch) Put(key, value []byte) error {
   436  	return tb.batch.Put(append([]byte(tb.prefix), key...), value)
   437  }
   438  
   439  func (tb *tableBatch) Delete(key []byte) error {
   440  	return tb.batch.Delete(append([]byte(tb.prefix), key...))
   441  }
   442  
   443  func (tb *tableBatch) Write() error {
   444  	return tb.batch.Write()
   445  }
   446  
   447  func (tb *tableBatch) ValueSize() int {
   448  	return tb.batch.ValueSize()
   449  }
   450  
   451  func (tb *tableBatch) Reset() {
   452  	tb.batch.Reset()
   453  }