github.com/arieschain/arieschain@v0.0.0-20191023063405-37c074544356/qctdb/database.go (about)

     1  package qctdb
     2  
     3  import (
     4  	"fmt"
     5  	"strconv"
     6  	"strings"
     7  	"sync"
     8  	"time"
     9  
    10  	"github.com/quickchainproject/quickchain/log"
    11  	"github.com/quickchainproject/quickchain/metrics"
    12  	"github.com/syndtr/goleveldb/leveldb"
    13  	"github.com/syndtr/goleveldb/leveldb/errors"
    14  	"github.com/syndtr/goleveldb/leveldb/filter"
    15  	"github.com/syndtr/goleveldb/leveldb/iterator"
    16  	"github.com/syndtr/goleveldb/leveldb/opt"
    17  	"github.com/syndtr/goleveldb/leveldb/util"
    18  )
    19  
    20  const (
    21  	writeDelayNThreshold       = 200
    22  	writeDelayThreshold        = 350 * time.Millisecond
    23  	writeDelayWarningThrottler = 1 * time.Minute
    24  )
    25  
    26  var OpenFileLimit = 64
    27  
    28  type LDBDatabase struct {
    29  	fn string      // filename for reporting
    30  	db *leveldb.DB // LevelDB instance
    31  
    32  	compTimeMeter    metrics.Meter // Meter for measuring the total time spent in database compaction
    33  	compReadMeter    metrics.Meter // Meter for measuring the data read during compaction
    34  	compWriteMeter   metrics.Meter // Meter for measuring the data written during compaction
    35  	writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction
    36  	writeDelayMeter  metrics.Meter // Meter for measuring the write delay duration due to database compaction
    37  	diskReadMeter    metrics.Meter // Meter for measuring the effective amount of data read
    38  	diskWriteMeter   metrics.Meter // Meter for measuring the effective amount of data written
    39  
    40  	quitLock sync.Mutex      // Mutex protecting the quit channel access
    41  	quitChan chan chan error // Quit channel to stop the metrics collection before closing the database
    42  
    43  	log log.Logger // Contextual logger tracking the database path
    44  }
    45  
    46  // NewLDBDatabase returns a LevelDB wrapped object.
    47  func NewLDBDatabase(file string, cache int, handles int) (*LDBDatabase, error) {
    48  	logger := log.New("database", file)
    49  
    50  	// Ensure we have some minimal caching and file guarantees
    51  	if cache < 16 {
    52  		cache = 16
    53  	}
    54  	if handles < 16 {
    55  		handles = 16
    56  	}
    57  	logger.Info("Allocated cache and file handles", "cache", cache, "handles", handles)
    58  
    59  	// Open the db and recover any potential corruptions
    60  	db, err := leveldb.OpenFile(file, &opt.Options{
    61  		OpenFilesCacheCapacity: handles,
    62  		BlockCacheCapacity:     cache / 2 * opt.MiB,
    63  		WriteBuffer:            cache / 4 * opt.MiB, // Two of these are used internally
    64  		Filter:                 filter.NewBloomFilter(10),
    65  	})
    66  	if _, corrupted := err.(*errors.ErrCorrupted); corrupted {
    67  		db, err = leveldb.RecoverFile(file, nil)
    68  	}
    69  	// (Re)check for errors and abort if opening of the db failed
    70  	if err != nil {
    71  		return nil, err
    72  	}
    73  	return &LDBDatabase{
    74  		fn:  file,
    75  		db:  db,
    76  		log: logger,
    77  	}, nil
    78  }
    79  
    80  // Path returns the path to the database directory.
    81  func (db *LDBDatabase) Path() string {
    82  	return db.fn
    83  }
    84  
    85  // Put puts the given key / value to the queue
    86  func (db *LDBDatabase) Put(key []byte, value []byte) error {
    87  	return db.db.Put(key, value, nil)
    88  }
    89  
    90  func (db *LDBDatabase) Has(key []byte) (bool, error) {
    91  	return db.db.Has(key, nil)
    92  }
    93  
    94  // Get returns the given key if it's present.
    95  func (db *LDBDatabase) Get(key []byte) ([]byte, error) {
    96  	dat, err := db.db.Get(key, nil)
    97  	if err != nil {
    98  		return nil, err
    99  	}
   100  	return dat, nil
   101  }
   102  
   103  // Delete deletes the key from the queue and database
   104  func (db *LDBDatabase) Delete(key []byte) error {
   105  	return db.db.Delete(key, nil)
   106  }
   107  
   108  func (db *LDBDatabase) NewIterator() iterator.Iterator {
   109  	return db.db.NewIterator(nil, nil)
   110  }
   111  
   112  // NewIteratorWithPrefix returns a iterator to iterate over subset of database content with a particular prefix.
   113  func (db *LDBDatabase) NewIteratorWithPrefix(prefix []byte) iterator.Iterator {
   114  	return db.db.NewIterator(util.BytesPrefix(prefix), nil)
   115  }
   116  
   117  func (db *LDBDatabase) Close() {
   118  	// Stop the metrics collection to avoid internal database races
   119  	db.quitLock.Lock()
   120  	defer db.quitLock.Unlock()
   121  
   122  	if db.quitChan != nil {
   123  		errc := make(chan error)
   124  		db.quitChan <- errc
   125  		if err := <-errc; err != nil {
   126  			db.log.Error("Metrics collection failed", "err", err)
   127  		}
   128  	}
   129  	err := db.db.Close()
   130  	if err == nil {
   131  		db.log.Info("Database closed")
   132  	} else {
   133  		db.log.Error("Failed to close database", "err", err)
   134  	}
   135  }
   136  
   137  func (db *LDBDatabase) LDB() *leveldb.DB {
   138  	return db.db
   139  }
   140  
   141  // Meter configures the database metrics collectors and
   142  func (db *LDBDatabase) Meter(prefix string) {
   143  	if metrics.Enabled {
   144  		// Initialize all the metrics collector at the requested prefix
   145  		db.compTimeMeter = metrics.NewRegisteredMeter(prefix+"compact/time", nil)
   146  		db.compReadMeter = metrics.NewRegisteredMeter(prefix+"compact/input", nil)
   147  		db.compWriteMeter = metrics.NewRegisteredMeter(prefix+"compact/output", nil)
   148  		db.diskReadMeter = metrics.NewRegisteredMeter(prefix+"disk/read", nil)
   149  		db.diskWriteMeter = metrics.NewRegisteredMeter(prefix+"disk/write", nil)
   150  	}
   151  	// Initialize write delay metrics no matter we are in metric mode or not.
   152  	db.writeDelayMeter = metrics.NewRegisteredMeter(prefix+"compact/writedelay/duration", nil)
   153  	db.writeDelayNMeter = metrics.NewRegisteredMeter(prefix+"compact/writedelay/counter", nil)
   154  
   155  	// Create a quit channel for the periodic collector and run it
   156  	db.quitLock.Lock()
   157  	db.quitChan = make(chan chan error)
   158  	db.quitLock.Unlock()
   159  
   160  	go db.meter(3 * time.Second)
   161  }
   162  
   163  // meter periodically retrieves internal leveldb counters and reports them to
   164  // the metrics subsystem.
   165  //
   166  // This is how a stats table look like (currently):
   167  //   Compactions
   168  //    Level |   Tables   |    Size(MB)   |    Time(sec)  |    Read(MB)   |   Write(MB)
   169  //   -------+------------+---------------+---------------+---------------+---------------
   170  //      0   |          0 |       0.00000 |       1.27969 |       0.00000 |      12.31098
   171  //      1   |         85 |     109.27913 |      28.09293 |     213.92493 |     214.26294
   172  //      2   |        523 |    1000.37159 |       7.26059 |      66.86342 |      66.77884
   173  //      3   |        570 |    1113.18458 |       0.00000 |       0.00000 |       0.00000
   174  //
   175  // This is how the write delay look like (currently):
   176  // DelayN:5 Delay:406.604657ms
   177  //
   178  // This is how the iostats look like (currently):
   179  // Read(MB):3895.04860 Write(MB):3654.64712
   180  func (db *LDBDatabase) meter(refresh time.Duration) {
   181  	// Create the counters to store current and previous compaction values
   182  	compactions := make([][]float64, 2)
   183  	for i := 0; i < 2; i++ {
   184  		compactions[i] = make([]float64, 3)
   185  	}
   186  	// Create storage for iostats.
   187  	var iostats [2]float64
   188  
   189  	// Create storage and warning log tracer for write delay.
   190  	var (
   191  		delaystats      [2]int64
   192  		lastWriteDelay  time.Time
   193  		lastWriteDelayN time.Time
   194  	)
   195  
   196  	// Iterate ad infinitum and collect the stats
   197  	for i := 1; ; i++ {
   198  		// Retrieve the database stats
   199  		stats, err := db.db.GetProperty("leveldb.stats")
   200  		if err != nil {
   201  			db.log.Error("Failed to read database stats", "err", err)
   202  			return
   203  		}
   204  		// Find the compaction table, skip the header
   205  		lines := strings.Split(stats, "\n")
   206  		for len(lines) > 0 && strings.TrimSpace(lines[0]) != "Compactions" {
   207  			lines = lines[1:]
   208  		}
   209  		if len(lines) <= 3 {
   210  			db.log.Error("Compaction table not found")
   211  			return
   212  		}
   213  		lines = lines[3:]
   214  
   215  		// Iterate over all the table rows, and accumulate the entries
   216  		for j := 0; j < len(compactions[i%2]); j++ {
   217  			compactions[i%2][j] = 0
   218  		}
   219  		for _, line := range lines {
   220  			parts := strings.Split(line, "|")
   221  			if len(parts) != 6 {
   222  				break
   223  			}
   224  			for idx, counter := range parts[3:] {
   225  				value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64)
   226  				if err != nil {
   227  					db.log.Error("Compaction entry parsing failed", "err", err)
   228  					return
   229  				}
   230  				compactions[i%2][idx] += value
   231  			}
   232  		}
   233  		// Update all the requested meters
   234  		if db.compTimeMeter != nil {
   235  			db.compTimeMeter.Mark(int64((compactions[i%2][0] - compactions[(i-1)%2][0]) * 1000 * 1000 * 1000))
   236  		}
   237  		if db.compReadMeter != nil {
   238  			db.compReadMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1024 * 1024))
   239  		}
   240  		if db.compWriteMeter != nil {
   241  			db.compWriteMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024))
   242  		}
   243  
   244  		// Retrieve the write delay statistic
   245  		writedelay, err := db.db.GetProperty("leveldb.writedelay")
   246  		if err != nil {
   247  			db.log.Error("Failed to read database write delay statistic", "err", err)
   248  			return
   249  		}
   250  		var (
   251  			delayN        int64
   252  			delayDuration string
   253  			duration      time.Duration
   254  		)
   255  		if n, err := fmt.Sscanf(writedelay, "DelayN:%d Delay:%s", &delayN, &delayDuration); n != 2 || err != nil {
   256  			db.log.Error("Write delay statistic not found")
   257  			return
   258  		}
   259  		duration, err = time.ParseDuration(delayDuration)
   260  		if err != nil {
   261  			db.log.Error("Failed to parse delay duration", "err", err)
   262  			return
   263  		}
   264  		if db.writeDelayNMeter != nil {
   265  			db.writeDelayNMeter.Mark(delayN - delaystats[0])
   266  			// If the write delay number been collected in the last minute exceeds the predefined threshold,
   267  			// print a warning log here.
   268  			// If a warning that db performance is laggy has been displayed,
   269  			// any subsequent warnings will be withhold for 1 minute to don't overwhelm the user.
   270  			if int(db.writeDelayNMeter.Rate1()) > writeDelayNThreshold &&
   271  				time.Now().After(lastWriteDelayN.Add(writeDelayWarningThrottler)) {
   272  				db.log.Warn("Write delay number exceeds the threshold (200 per second) in the last minute")
   273  				lastWriteDelayN = time.Now()
   274  			}
   275  		}
   276  		if db.writeDelayMeter != nil {
   277  			db.writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1])
   278  			// If the write delay duration been collected in the last minute exceeds the predefined threshold,
   279  			// print a warning log here.
   280  			// If a warning that db performance is laggy has been displayed,
   281  			// any subsequent warnings will be withhold for 1 minute to don't overwhelm the user.
   282  			if int64(db.writeDelayMeter.Rate1()) > writeDelayThreshold.Nanoseconds() &&
   283  				time.Now().After(lastWriteDelay.Add(writeDelayWarningThrottler)) {
   284  				db.log.Warn("Write delay duration exceeds the threshold (35% of the time) in the last minute")
   285  				lastWriteDelay = time.Now()
   286  			}
   287  		}
   288  		delaystats[0], delaystats[1] = delayN, duration.Nanoseconds()
   289  
   290  		// Retrieve the database iostats.
   291  		ioStats, err := db.db.GetProperty("leveldb.iostats")
   292  		if err != nil {
   293  			db.log.Error("Failed to read database iostats", "err", err)
   294  			return
   295  		}
   296  		parts := strings.Split(ioStats, " ")
   297  		if len(parts) < 2 {
   298  			db.log.Error("Bad syntax of ioStats", "ioStats", ioStats)
   299  			return
   300  		}
   301  		r := strings.Split(parts[0], ":")
   302  		if len(r) < 2 {
   303  			db.log.Error("Bad syntax of read entry", "entry", parts[0])
   304  			return
   305  		}
   306  		read, err := strconv.ParseFloat(r[1], 64)
   307  		if err != nil {
   308  			db.log.Error("Read entry parsing failed", "err", err)
   309  			return
   310  		}
   311  		w := strings.Split(parts[1], ":")
   312  		if len(w) < 2 {
   313  			db.log.Error("Bad syntax of write entry", "entry", parts[1])
   314  			return
   315  		}
   316  		write, err := strconv.ParseFloat(w[1], 64)
   317  		if err != nil {
   318  			db.log.Error("Write entry parsing failed", "err", err)
   319  			return
   320  		}
   321  		if db.diskReadMeter != nil {
   322  			db.diskReadMeter.Mark(int64((read - iostats[0]) * 1024 * 1024))
   323  		}
   324  		if db.diskWriteMeter != nil {
   325  			db.diskWriteMeter.Mark(int64((write - iostats[1]) * 1024 * 1024))
   326  		}
   327  		iostats[0] = read
   328  		iostats[1] = write
   329  
   330  		// Sleep a bit, then repeat the stats collection
   331  		select {
   332  		case errc := <-db.quitChan:
   333  			// Quit requesting, stop hammering the database
   334  			errc <- nil
   335  			return
   336  
   337  		case <-time.After(refresh):
   338  			// Timeout, gather a new set of stats
   339  		}
   340  	}
   341  }
   342  
   343  func (db *LDBDatabase) NewBatch() Batch {
   344  	return &ldbBatch{db: db.db, b: new(leveldb.Batch)}
   345  }
   346  
   347  type ldbBatch struct {
   348  	db   *leveldb.DB
   349  	b    *leveldb.Batch
   350  	size int
   351  }
   352  
   353  func (b *ldbBatch) Put(key, value []byte) error {
   354  	b.b.Put(key, value)
   355  	b.size += len(value)
   356  	return nil
   357  }
   358  
   359  func (b *ldbBatch) Write() error {
   360  	return b.db.Write(b.b, nil)
   361  }
   362  
   363  func (b *ldbBatch) ValueSize() int {
   364  	return b.size
   365  }
   366  
   367  func (b *ldbBatch) Reset() {
   368  	b.b.Reset()
   369  	b.size = 0
   370  }
   371  
   372  type table struct {
   373  	db     Database
   374  	prefix string
   375  }
   376  
   377  // NewTable returns a Database object that prefixes all keys with a given
   378  // string.
   379  func NewTable(db Database, prefix string) Database {
   380  	return &table{
   381  		db:     db,
   382  		prefix: prefix,
   383  	}
   384  }
   385  
   386  func (dt *table) Put(key []byte, value []byte) error {
   387  	return dt.db.Put(append([]byte(dt.prefix), key...), value)
   388  }
   389  
   390  func (dt *table) Has(key []byte) (bool, error) {
   391  	return dt.db.Has(append([]byte(dt.prefix), key...))
   392  }
   393  
   394  func (dt *table) Get(key []byte) ([]byte, error) {
   395  	return dt.db.Get(append([]byte(dt.prefix), key...))
   396  }
   397  
   398  func (dt *table) Delete(key []byte) error {
   399  	return dt.db.Delete(append([]byte(dt.prefix), key...))
   400  }
   401  
   402  func (dt *table) Close() {
   403  	// Do nothing; don't close the underlying DB.
   404  }
   405  
   406  type tableBatch struct {
   407  	batch  Batch
   408  	prefix string
   409  }
   410  
   411  // NewTableBatch returns a Batch object which prefixes all keys with a given string.
   412  func NewTableBatch(db Database, prefix string) Batch {
   413  	return &tableBatch{db.NewBatch(), prefix}
   414  }
   415  
   416  func (dt *table) NewBatch() Batch {
   417  	return &tableBatch{dt.db.NewBatch(), dt.prefix}
   418  }
   419  
   420  func (tb *tableBatch) Put(key, value []byte) error {
   421  	return tb.batch.Put(append([]byte(tb.prefix), key...), value)
   422  }
   423  
   424  func (tb *tableBatch) Write() error {
   425  	return tb.batch.Write()
   426  }
   427  
   428  func (tb *tableBatch) ValueSize() int {
   429  	return tb.batch.ValueSize()
   430  }
   431  
   432  func (tb *tableBatch) Reset() {
   433  	tb.batch.Reset()
   434  }