github.com/annchain/OG@v0.0.9/ogdb/leveldb.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package ogdb
    18  
    19  import (
    20  	"fmt"
    21  	"github.com/annchain/OG/arefactor/common/goroutine"
    22  	"strconv"
    23  	"strings"
    24  	"sync"
    25  	"time"
    26  
    27  	"github.com/syndtr/goleveldb/leveldb"
    28  	"github.com/syndtr/goleveldb/leveldb/errors"
    29  	"github.com/syndtr/goleveldb/leveldb/filter"
    30  	"github.com/syndtr/goleveldb/leveldb/iterator"
    31  	"github.com/syndtr/goleveldb/leveldb/opt"
    32  	"github.com/syndtr/goleveldb/leveldb/util"
    33  
    34  	"github.com/annchain/OG/metrics"
    35  	log "github.com/sirupsen/logrus"
    36  )
    37  
    38  const (
    39  	writeDelayNThreshold       = 200
    40  	writeDelayThreshold        = 350 * time.Millisecond
    41  	writeDelayWarningThrottler = 1 * time.Minute
    42  )
    43  
    44  var OpenFileLimit = 64
    45  
    46  type LevelDB struct {
    47  	fn string      // filename for reporting
    48  	db *leveldb.DB // LevelDB instance
    49  
    50  	compTimeMeter    metrics.Meter // Meter for measuring the total time spent in database compaction
    51  	compReadMeter    metrics.Meter // Meter for measuring the data read during compaction
    52  	compWriteMeter   metrics.Meter // Meter for measuring the data written during compaction
    53  	writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction
    54  	writeDelayMeter  metrics.Meter // Meter for measuring the write delay duration due to database compaction
    55  	diskReadMeter    metrics.Meter // Meter for measuring the effective amount of data read
    56  	diskWriteMeter   metrics.Meter // Meter for measuring the effective amount of data written
    57  
    58  	quitLock sync.Mutex      // Mutex protecting the quit channel access
    59  	quitChan chan chan error // Quit channel to stop the metrics collection before closing the database
    60  }
    61  type LevelDBConfig struct {
    62  	Path    string
    63  	Cache   int
    64  	Handles int
    65  }
    66  
    67  // NewLevelDB returns a LevelDB wrapped object.
    68  func NewLevelDB(file string, cache int, handles int) (*LevelDB, error) {
    69  	// Ensure we have some minimal caching and file guarantees
    70  	if cache < 16 {
    71  		cache = 16
    72  	}
    73  	if handles < 16 {
    74  		handles = 16
    75  	}
    76  	log.Info("Allocated cache and file handles", "cache", cache, "handles", handles)
    77  
    78  	// Open the db and recover any potential corruptions
    79  	db, err := leveldb.OpenFile(file, &opt.Options{
    80  		OpenFilesCacheCapacity: handles,
    81  		BlockCacheCapacity:     cache / 2 * opt.MiB,
    82  		WriteBuffer:            cache / 4 * opt.MiB, // Two of these are used internally
    83  		Filter:                 filter.NewBloomFilter(10),
    84  	})
    85  	if _, corrupted := err.(*errors.ErrCorrupted); corrupted {
    86  		log.WithError(err).Warning("recovering")
    87  		db, err = leveldb.RecoverFile(file, nil)
    88  	}
    89  	// (Re)check for errors and abort if opening of the db failed
    90  	if err != nil {
    91  		log.WithError(err).Warning("create db error")
    92  		return nil, err
    93  	}
    94  	return &LevelDB{
    95  		fn: file,
    96  		db: db,
    97  	}, nil
    98  }
    99  
   100  // Path returns the path to the database directory.
   101  func (db *LevelDB) Path() string {
   102  	return db.fn
   103  }
   104  
   105  // Put puts the given key / value to the queue
   106  func (db *LevelDB) Put(key []byte, value []byte) error {
   107  	return db.db.Put(key, value, nil)
   108  }
   109  
   110  func (db *LevelDB) Has(key []byte) (bool, error) {
   111  	return db.db.Has(key, nil)
   112  }
   113  
   114  // Get returns the given key if it's present.
   115  func (db *LevelDB) Get(key []byte) ([]byte, error) {
   116  	dat, err := db.db.Get(key, nil)
   117  	if err != nil {
   118  		return nil, err
   119  	}
   120  	return dat, nil
   121  }
   122  
   123  // Delete deletes the key from the queue and database
   124  func (db *LevelDB) Delete(key []byte) error {
   125  	return db.db.Delete(key, nil)
   126  }
   127  
   128  func (db *LevelDB) NewIterator() iterator.Iterator {
   129  	return db.db.NewIterator(nil, nil)
   130  }
   131  
   132  // NewIteratorWithPrefix returns a iterator to iterate over subset of database content with a particular prefix.
   133  func (db *LevelDB) NewIteratorWithPrefix(prefix []byte) iterator.Iterator {
   134  	return db.db.NewIterator(util.BytesPrefix(prefix), nil)
   135  }
   136  
   137  func (db *LevelDB) Close() {
   138  	// Stop the metrics collection to avoid internal database races
   139  	db.quitLock.Lock()
   140  	defer db.quitLock.Unlock()
   141  
   142  	if db.quitChan != nil {
   143  		errc := make(chan error)
   144  		db.quitChan <- errc
   145  		if err := <-errc; err != nil {
   146  			log.Error("Metrics collection failed", "err", err)
   147  		}
   148  		db.quitChan = nil
   149  	}
   150  	err := db.db.Close()
   151  	if err == nil {
   152  		log.Info("Database closed")
   153  	} else {
   154  		log.Error("Failed to close database", "err", err)
   155  	}
   156  }
   157  
   158  func (db *LevelDB) LDB() *leveldb.DB {
   159  	return db.db
   160  }
   161  
   162  // Meter configures the database metrics collectors and
   163  func (db *LevelDB) Meter(prefix string) {
   164  	if metrics.Enabled {
   165  		// Initialize all the metrics collector at the requested prefix
   166  		db.compTimeMeter = metrics.NewRegisteredMeter(prefix+"compact/time", nil)
   167  		db.compReadMeter = metrics.NewRegisteredMeter(prefix+"compact/input", nil)
   168  		db.compWriteMeter = metrics.NewRegisteredMeter(prefix+"compact/output", nil)
   169  		db.diskReadMeter = metrics.NewRegisteredMeter(prefix+"disk/read", nil)
   170  		db.diskWriteMeter = metrics.NewRegisteredMeter(prefix+"disk/write", nil)
   171  	}
   172  	// Initialize write delay metrics no matter we are in metric mode or not.
   173  	db.writeDelayMeter = metrics.NewRegisteredMeter(prefix+"compact/writedelay/duration", nil)
   174  	db.writeDelayNMeter = metrics.NewRegisteredMeter(prefix+"compact/writedelay/counter", nil)
   175  
   176  	// Create a quit channel for the periodic collector and run it
   177  	db.quitLock.Lock()
   178  	db.quitChan = make(chan chan error)
   179  	db.quitLock.Unlock()
   180  
   181  	goroutine.New(func() {
   182  		db.meter(3 * time.Second)
   183  	})
   184  }
   185  
   186  // meter periodically retrieves internal leveldb counters and reports them to
   187  // the metrics subsystem.
   188  //
   189  // This is how a stats table look like (currently):
   190  //   Compactions
   191  //    Level |   Tables   |    Size(MB)   |    Time(sec)  |    Read(MB)   |   Write(MB)
   192  //   -------+------------+---------------+---------------+---------------+---------------
   193  //      0   |          0 |       0.00000 |       1.27969 |       0.00000 |      12.31098
   194  //      1   |         85 |     109.27913 |      28.09293 |     213.92493 |     214.26294
   195  //      2   |        523 |    1000.37159 |       7.26059 |      66.86342 |      66.77884
   196  //      3   |        570 |    1113.18458 |       0.00000 |       0.00000 |       0.00000
   197  //
   198  // This is how the write delay look like (currently):
   199  // DelayN:5 Delay:406.604657ms Paused: false
   200  //
   201  // This is how the iostats look like (currently):
   202  // Read(MB):3895.04860 Write(MB):3654.64712
   203  func (db *LevelDB) meter(refresh time.Duration) {
   204  	// Create the counters to store current and previous compaction values
   205  	compactions := make([][]float64, 2)
   206  	for i := 0; i < 2; i++ {
   207  		compactions[i] = make([]float64, 3)
   208  	}
   209  	// Create storage for iostats.
   210  	var iostats [2]float64
   211  
   212  	// Create storage and warning log tracer for write delay.
   213  	var (
   214  		delaystats      [2]int64
   215  		lastWriteDelay  time.Time
   216  		lastWriteDelayN time.Time
   217  		lastWritePaused time.Time
   218  	)
   219  
   220  	var (
   221  		errc chan error
   222  		merr error
   223  	)
   224  
   225  	// Iterate ad infinitum and collect the stats
   226  	for i := 1; errc == nil && merr == nil; i++ {
   227  		// Retrieve the database stats
   228  		stats, err := db.db.GetProperty("leveldb.stats")
   229  		if err != nil {
   230  			log.Error("Failed to read database stats", "err", err)
   231  			merr = err
   232  			continue
   233  		}
   234  		// Find the compaction table, skip the header
   235  		lines := strings.Split(stats, "\n")
   236  		for len(lines) > 0 && strings.TrimSpace(lines[0]) != "Compactions" {
   237  			lines = lines[1:]
   238  		}
   239  		if len(lines) <= 3 {
   240  			log.Error("Compaction table not found")
   241  			merr = errors.New("compaction table not found")
   242  			continue
   243  		}
   244  		lines = lines[3:]
   245  
   246  		// Iterate over all the table rows, and accumulate the entries
   247  		for j := 0; j < len(compactions[i%2]); j++ {
   248  			compactions[i%2][j] = 0
   249  		}
   250  		for _, line := range lines {
   251  			parts := strings.Split(line, "|")
   252  			if len(parts) != 6 {
   253  				break
   254  			}
   255  			for idx, counter := range parts[3:] {
   256  				value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64)
   257  				if err != nil {
   258  					log.Error("Compaction entry parsing failed", "err", err)
   259  					merr = err
   260  					continue
   261  				}
   262  				compactions[i%2][idx] += value
   263  			}
   264  		}
   265  		// Update all the requested meters
   266  		if db.compTimeMeter != nil {
   267  			db.compTimeMeter.Mark(int64((compactions[i%2][0] - compactions[(i-1)%2][0]) * 1000 * 1000 * 1000))
   268  		}
   269  		if db.compReadMeter != nil {
   270  			db.compReadMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1024 * 1024))
   271  		}
   272  		if db.compWriteMeter != nil {
   273  			db.compWriteMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024))
   274  		}
   275  
   276  		// Retrieve the write delay statistic
   277  		writedelay, err := db.db.GetProperty("leveldb.writedelay")
   278  		if err != nil {
   279  			log.Error("Failed to read database write delay statistic", "err", err)
   280  			merr = err
   281  			continue
   282  		}
   283  		var (
   284  			delayN        int64
   285  			delayDuration string
   286  			duration      time.Duration
   287  			paused        bool
   288  		)
   289  		if n, err := fmt.Sscanf(writedelay, "DelayN:%d Delay:%s Paused:%t", &delayN, &delayDuration, &paused); n != 3 || err != nil {
   290  			log.Error("Write delay statistic not found")
   291  			merr = err
   292  			continue
   293  		}
   294  		duration, err = time.ParseDuration(delayDuration)
   295  		if err != nil {
   296  			log.Error("Failed to parse delay duration", "err", err)
   297  			merr = err
   298  			continue
   299  		}
   300  		if db.writeDelayNMeter != nil {
   301  			db.writeDelayNMeter.Mark(delayN - delaystats[0])
   302  			// If the write delay number been collected in the last minute exceeds the predefined threshold,
   303  			// print a warning log here.
   304  			// If a warning that db performance is laggy has been displayed,
   305  			// any subsequent warnings will be withhold for 1 minute to don't overwhelm the user.
   306  			if int(db.writeDelayNMeter.Rate1()) > writeDelayNThreshold &&
   307  				time.Now().After(lastWriteDelayN.Add(writeDelayWarningThrottler)) {
   308  				log.Warn("Write delay number exceeds the threshold (200 per second) in the last minute")
   309  				lastWriteDelayN = time.Now()
   310  			}
   311  		}
   312  		if db.writeDelayMeter != nil {
   313  			db.writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1])
   314  			// If the write delay duration been collected in the last minute exceeds the predefined threshold,
   315  			// print a warning log here.
   316  			// If a warning that db performance is laggy has been displayed,
   317  			// any subsequent warnings will be withhold for 1 minute to don't overwhelm the user.
   318  			if int64(db.writeDelayMeter.Rate1()) > writeDelayThreshold.Nanoseconds() &&
   319  				time.Now().After(lastWriteDelay.Add(writeDelayWarningThrottler)) {
   320  				log.Warn("Write delay duration exceeds the threshold (35% of the time) in the last minute")
   321  				lastWriteDelay = time.Now()
   322  			}
   323  		}
   324  		// If a warning that db is performing compaction has been displayed, any subsequent
   325  		// warnings will be withheld for one minute not to overwhelm the user.
   326  		if paused && delayN-delaystats[0] == 0 && duration.Nanoseconds()-delaystats[1] == 0 &&
   327  			time.Now().After(lastWritePaused.Add(writeDelayWarningThrottler)) {
   328  			log.Warn("Database compacting, degraded performance")
   329  			lastWritePaused = time.Now()
   330  		}
   331  
   332  		delaystats[0], delaystats[1] = delayN, duration.Nanoseconds()
   333  
   334  		// Retrieve the database iostats.
   335  		ioStats, err := db.db.GetProperty("leveldb.iostats")
   336  		if err != nil {
   337  			log.Error("Failed to read database iostats", "err", err)
   338  			merr = err
   339  			continue
   340  		}
   341  		var nRead, nWrite float64
   342  		parts := strings.Split(ioStats, " ")
   343  		if len(parts) < 2 {
   344  			log.Error("Bad syntax of ioStats", "ioStats", ioStats)
   345  			merr = fmt.Errorf("bad syntax of ioStats %s", ioStats)
   346  			continue
   347  		}
   348  		if n, err := fmt.Sscanf(parts[0], "Read(MB):%f", &nRead); n != 1 || err != nil {
   349  			log.Error("Bad syntax of read entry", "entry", parts[0])
   350  			merr = err
   351  			continue
   352  		}
   353  		if n, err := fmt.Sscanf(parts[1], "Write(MB):%f", &nWrite); n != 1 || err != nil {
   354  			log.Error("Bad syntax of write entry", "entry", parts[1])
   355  			merr = err
   356  			continue
   357  		}
   358  		if db.diskReadMeter != nil {
   359  			db.diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024))
   360  		}
   361  		if db.diskWriteMeter != nil {
   362  			db.diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024))
   363  		}
   364  		iostats[0], iostats[1] = nRead, nWrite
   365  
   366  		// Sleep a bit, then repeat the stats collection
   367  		select {
   368  		case errc = <-db.quitChan:
   369  			// Quit requesting, stop hammering the database
   370  		case <-time.After(refresh):
   371  			// Timeout, gather a new set of stats
   372  		}
   373  	}
   374  
   375  	if errc == nil {
   376  		errc = <-db.quitChan
   377  	}
   378  	errc <- merr
   379  }
   380  
   381  func (db *LevelDB) NewBatch() Batch {
   382  	return &ldbBatch{db: db.db, b: new(leveldb.Batch)}
   383  }
   384  
   385  type ldbBatch struct {
   386  	db   *leveldb.DB
   387  	b    *leveldb.Batch
   388  	size int
   389  }
   390  
   391  func (b *ldbBatch) Put(key, value []byte) error {
   392  	b.b.Put(key, value)
   393  	b.size += len(value)
   394  	return nil
   395  }
   396  
   397  func (b *ldbBatch) Write() error {
   398  	return b.db.Write(b.b, nil)
   399  }
   400  
   401  func (b *ldbBatch) ValueSize() int {
   402  	return b.size
   403  }
   404  
   405  func (b *ldbBatch) Reset() {
   406  	b.b.Reset()
   407  	b.size = 0
   408  }
   409  
   410  type table struct {
   411  	db     Database
   412  	prefix string
   413  }
   414  
   415  // NewTable returns a Database object that prefixes all keys with a given
   416  // string.
   417  func NewTable(db Database, prefix string) Database {
   418  	return &table{
   419  		db:     db,
   420  		prefix: prefix,
   421  	}
   422  }
   423  
   424  func (dt *table) Put(key []byte, value []byte) error {
   425  	return dt.db.Put(append([]byte(dt.prefix), key...), value)
   426  }
   427  
   428  func (dt *table) Has(key []byte) (bool, error) {
   429  	return dt.db.Has(append([]byte(dt.prefix), key...))
   430  }
   431  
   432  func (dt *table) Get(key []byte) ([]byte, error) {
   433  	return dt.db.Get(append([]byte(dt.prefix), key...))
   434  }
   435  
   436  func (dt *table) Delete(key []byte) error {
   437  	return dt.db.Delete(append([]byte(dt.prefix), key...))
   438  }
   439  
   440  func (dt *table) Close() {
   441  	// Do nothing; don't close the underlying DB.
   442  }
   443  
   444  type tableBatch struct {
   445  	batch  Batch
   446  	prefix string
   447  }
   448  
   449  // NewTableBatch returns a Batch object which prefixes all keys with a given string.
   450  func NewTableBatch(db Database, prefix string) Batch {
   451  	return &tableBatch{db.NewBatch(), prefix}
   452  }
   453  
   454  func (dt *table) NewBatch() Batch {
   455  	return &tableBatch{dt.db.NewBatch(), dt.prefix}
   456  }
   457  
   458  func (tb *tableBatch) Put(key, value []byte) error {
   459  	return tb.batch.Put(append([]byte(tb.prefix), key...), value)
   460  }
   461  
   462  func (tb *tableBatch) Write() error {
   463  	return tb.batch.Write()
   464  }
   465  
   466  func (tb *tableBatch) ValueSize() int {
   467  	return tb.batch.ValueSize()
   468  }
   469  
   470  func (tb *tableBatch) Reset() {
   471  	tb.batch.Reset()
   472  }