github.com/turingchain2020/turingchain@v1.1.21/common/db/go_level_db.go (about)

     1  // Copyright Turing Corp. 2018 All Rights Reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package db
     6  
     7  import (
     8  	"bytes"
     9  	"fmt"
    10  	"path"
    11  	"strconv"
    12  	"strings"
    13  	"time"
    14  
    15  	log "github.com/turingchain2020/turingchain/common/log/log15"
    16  	"github.com/turingchain2020/turingchain/types"
    17  	metrics "github.com/rcrowley/go-metrics"
    18  	"github.com/syndtr/goleveldb/leveldb"
    19  	"github.com/syndtr/goleveldb/leveldb/errors"
    20  	"github.com/syndtr/goleveldb/leveldb/filter"
    21  	"github.com/syndtr/goleveldb/leveldb/iterator"
    22  	"github.com/syndtr/goleveldb/leveldb/opt"
    23  	"github.com/syndtr/goleveldb/leveldb/util"
    24  )
    25  
    26  var llog = log.New("module", "db.goleveldb")
    27  
    28  const (
    29  	// degradationWarnInterval specifies how often warning should be printed if the
    30  	// leveldb database cannot keep up with requested writes.
    31  	degradationWarnInterval = time.Minute
    32  
    33  	// metricsGatheringInterval specifies the interval to retrieve leveldb database
    34  	// compaction, io and pause stats to report to the user.
    35  	metricsGatheringInterval = 3 * time.Second
    36  )
    37  
    38  func init() {
    39  	dbCreator := func(name string, dir string, cache int) (DB, error) {
    40  		return NewGoLevelDB(name, dir, cache)
    41  	}
    42  	registerDBCreator(levelDBBackendStr, dbCreator, false)
    43  	registerDBCreator(goLevelDBBackendStr, dbCreator, false)
    44  }
    45  
    46  //GoLevelDB db
    47  type GoLevelDB struct {
    48  	BaseDB
    49  	db *leveldb.DB
    50  
    51  	compTimeMeter      metrics.Meter // Meter for measuring the total time spent in database compaction
    52  	compReadMeter      metrics.Meter // Meter for measuring the data read during compaction
    53  	compWriteMeter     metrics.Meter // Meter for measuring the data written during compaction
    54  	writeDelayNMeter   metrics.Meter // Meter for measuring the write delay number due to database compaction
    55  	writeDelayMeter    metrics.Meter // Meter for measuring the write delay duration due to database compaction
    56  	diskSizeGauge      metrics.Gauge // Gauge for tracking the size of all the levels in the database
    57  	diskReadMeter      metrics.Meter // Meter for measuring the effective amount of data read
    58  	diskWriteMeter     metrics.Meter // Meter for measuring the effective amount of data written
    59  	memCompGauge       metrics.Gauge // Gauge for tracking the number of memory compaction
    60  	level0CompGauge    metrics.Gauge // Gauge for tracking the number of table compaction in level0
    61  	nonlevel0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in non0 level
    62  	seekCompGauge      metrics.Gauge // Gauge for tracking the number of table compaction caused by read opt
    63  
    64  	quitChan chan chan error // Quit channel to stop the metrics collection before closing the database
    65  }
    66  
    67  //NewGoLevelDB new
    68  func NewGoLevelDB(name string, dir string, cache int) (*GoLevelDB, error) {
    69  	dbPath := path.Join(dir, name+".db")
    70  	if cache == 0 {
    71  		cache = 64
    72  	}
    73  	handles := cache
    74  	if handles < 16 {
    75  		handles = 16
    76  	}
    77  	if cache < 4 {
    78  		cache = 4
    79  	}
    80  	// Open the db and recover any potential corruptions
    81  	db, err := leveldb.OpenFile(dbPath, &opt.Options{
    82  		OpenFilesCacheCapacity: handles,
    83  		BlockCacheCapacity:     cache / 2 * opt.MiB,
    84  		WriteBuffer:            cache / 4 * opt.MiB, // Two of these are used internally
    85  		Filter:                 filter.NewBloomFilter(10),
    86  	})
    87  	if _, corrupted := err.(*errors.ErrCorrupted); corrupted {
    88  		db, err = leveldb.RecoverFile(dbPath, nil)
    89  	}
    90  	if err != nil {
    91  		return nil, err
    92  	}
    93  	database := &GoLevelDB{
    94  		db:       db,
    95  		quitChan: make(chan chan error),
    96  	}
    97  
    98  	namespace := ""
    99  	database.compTimeMeter = metrics.NewRegisteredMeter(namespace+"compact/time", nil)
   100  	database.compReadMeter = metrics.NewRegisteredMeter(namespace+"compact/input", nil)
   101  	database.compWriteMeter = metrics.NewRegisteredMeter(namespace+"compact/output", nil)
   102  	database.diskSizeGauge = metrics.NewRegisteredGauge(namespace+"disk/size", nil)
   103  	database.diskReadMeter = metrics.NewRegisteredMeter(namespace+"disk/read", nil)
   104  	database.diskWriteMeter = metrics.NewRegisteredMeter(namespace+"disk/write", nil)
   105  	database.writeDelayMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/duration", nil)
   106  	database.writeDelayNMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/counter", nil)
   107  	database.memCompGauge = metrics.NewRegisteredGauge(namespace+"compact/memory", nil)
   108  	database.level0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/level0", nil)
   109  	database.nonlevel0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/nonlevel0", nil)
   110  	database.seekCompGauge = metrics.NewRegisteredGauge(namespace+"compact/seek", nil)
   111  
   112  	// Start up the metrics gathering and return
   113  	go database.meter(metricsGatheringInterval)
   114  
   115  	return database, nil
   116  }
   117  
   118  //Get get
   119  func (db *GoLevelDB) Get(key []byte) ([]byte, error) {
   120  	res, err := db.db.Get(key, nil)
   121  	if err != nil {
   122  		if err == errors.ErrNotFound {
   123  			return nil, ErrNotFoundInDb
   124  		}
   125  		llog.Error("Get", "error", err)
   126  		return nil, err
   127  
   128  	}
   129  	return res, nil
   130  }
   131  
   132  //Set set
   133  func (db *GoLevelDB) Set(key []byte, value []byte) error {
   134  	err := db.db.Put(key, value, nil)
   135  	if err != nil {
   136  		llog.Error("Set", "error", err)
   137  		return err
   138  	}
   139  	return nil
   140  }
   141  
   142  //SetSync 同步
   143  func (db *GoLevelDB) SetSync(key []byte, value []byte) error {
   144  	err := db.db.Put(key, value, &opt.WriteOptions{Sync: true})
   145  	if err != nil {
   146  		llog.Error("SetSync", "error", err)
   147  		return err
   148  	}
   149  	return nil
   150  }
   151  
   152  //Delete 删除
   153  func (db *GoLevelDB) Delete(key []byte) error {
   154  	err := db.db.Delete(key, nil)
   155  	if err != nil {
   156  		llog.Error("Delete", "error", err)
   157  		return err
   158  	}
   159  	return nil
   160  }
   161  
   162  //DeleteSync 删除同步
   163  func (db *GoLevelDB) DeleteSync(key []byte) error {
   164  	err := db.db.Delete(key, &opt.WriteOptions{Sync: true})
   165  	if err != nil {
   166  		llog.Error("DeleteSync", "error", err)
   167  		return err
   168  	}
   169  	return nil
   170  }
   171  
   172  //DB db
   173  func (db *GoLevelDB) DB() *leveldb.DB {
   174  	return db.db
   175  }
   176  
   177  //Close 关闭
   178  func (db *GoLevelDB) Close() {
   179  	if db.quitChan != nil {
   180  		errc := make(chan error)
   181  		db.quitChan <- errc
   182  		if err := <-errc; err != nil {
   183  			llog.Error("Metrics collection failed", "err", err)
   184  		}
   185  		db.quitChan = nil
   186  	}
   187  
   188  	err := db.db.Close()
   189  	if err != nil {
   190  		llog.Error("Close", "error", err)
   191  	}
   192  }
   193  
   194  //Print 打印
   195  func (db *GoLevelDB) Print() {
   196  	str, err := db.db.GetProperty("leveldb.stats")
   197  	if err != nil {
   198  		return
   199  	}
   200  	llog.Info("Print", "stats", str)
   201  
   202  	iter := db.db.NewIterator(nil, nil)
   203  	for iter.Next() {
   204  		key := iter.Key()
   205  		value := iter.Value()
   206  		//fmt.Printf("[%X]:\t[%X]\n", key, value)
   207  		llog.Info("Print", "key", string(key), "value", string(value))
   208  	}
   209  }
   210  
   211  //Stats ...
   212  func (db *GoLevelDB) Stats() map[string]string {
   213  	keys := []string{
   214  		"leveldb.num-files-at-level{n}",
   215  		"leveldb.stats",
   216  		"leveldb.sstables",
   217  		"leveldb.blockpool",
   218  		"leveldb.cachedblock",
   219  		"leveldb.openedtables",
   220  		"leveldb.alivesnaps",
   221  		"leveldb.aliveiters",
   222  	}
   223  
   224  	stats := make(map[string]string)
   225  	for _, key := range keys {
   226  		str, err := db.db.GetProperty(key)
   227  		if err == nil {
   228  			stats[key] = str
   229  		}
   230  	}
   231  	return stats
   232  }
   233  
   234  //Iterator 迭代器
   235  func (db *GoLevelDB) Iterator(start []byte, end []byte, reverse bool) Iterator {
   236  	if end == nil {
   237  		end = bytesPrefix(start)
   238  	}
   239  	if bytes.Equal(end, types.EmptyValue) {
   240  		end = nil
   241  	}
   242  	r := &util.Range{Start: start, Limit: end}
   243  	it := db.db.NewIterator(r, nil)
   244  	return &goLevelDBIt{it, itBase{start, end, reverse}}
   245  }
   246  
   247  //BeginTx call panic when BeginTx not rewrite
   248  func (db *GoLevelDB) BeginTx() (TxKV, error) {
   249  	tx, err := db.db.OpenTransaction()
   250  	if err != nil {
   251  		return nil, err
   252  	}
   253  	return &goLevelDBTx{tx: tx}, nil
   254  }
   255  
   256  // CompactRange ...
   257  func (db *GoLevelDB) CompactRange(start, limit []byte) error {
   258  	r := util.Range{Start: start, Limit: limit}
   259  	return db.db.CompactRange(r)
   260  }
   261  
   262  // meter periodically retrieves internal leveldb counters and reports them to
   263  // the metrics subsystem.
   264  //
   265  // This is how a LevelDB stats table looks like (currently):
   266  //   Compactions
   267  //    Level |   Tables   |    Size(MB)   |    Time(sec)  |    Read(MB)   |   Write(MB)
   268  //   -------+------------+---------------+---------------+---------------+---------------
   269  //      0   |          0 |       0.00000 |       1.27969 |       0.00000 |      12.31098
   270  //      1   |         85 |     109.27913 |      28.09293 |     213.92493 |     214.26294
   271  //      2   |        523 |    1000.37159 |       7.26059 |      66.86342 |      66.77884
   272  //      3   |        570 |    1113.18458 |       0.00000 |       0.00000 |       0.00000
   273  //
   274  // This is how the write delay look like (currently):
   275  // DelayN:5 Delay:406.604657ms Paused: false
   276  //
   277  // This is how the iostats look like (currently):
   278  // Read(MB):3895.04860 Write(MB):3654.64712
   279  func (db *GoLevelDB) meter(refresh time.Duration) {
   280  	// Create the counters to store current and previous compaction values
   281  	compactions := make([][]float64, 2)
   282  	for i := 0; i < 2; i++ {
   283  		compactions[i] = make([]float64, 4)
   284  	}
   285  	// Create storage for iostats.
   286  	var iostats [2]float64
   287  
   288  	// Create storage and warning log tracer for write delay.
   289  	var (
   290  		delaystats      [2]int64
   291  		lastWritePaused time.Time
   292  	)
   293  
   294  	var (
   295  		errc chan error
   296  		merr error
   297  	)
   298  
   299  	// Iterate ad infinitum and collect the stats
   300  	for i := 1; errc == nil && merr == nil; i++ {
   301  		// Retrieve the database stats
   302  		stats, err := db.db.GetProperty("leveldb.stats")
   303  		if err != nil {
   304  			llog.Error("Failed to read database stats", "err", err)
   305  			merr = err
   306  			continue
   307  		}
   308  		// Find the compaction table, skip the header
   309  		lines := strings.Split(stats, "\n")
   310  		for len(lines) > 0 && strings.TrimSpace(lines[0]) != "Compactions" {
   311  			lines = lines[1:]
   312  		}
   313  		if len(lines) <= 3 {
   314  			llog.Error("Compaction leveldbTable not found")
   315  			merr = errors.New("compaction leveldbTable not found")
   316  			continue
   317  		}
   318  		lines = lines[3:]
   319  
   320  		// Iterate over all the leveldbTable rows, and accumulate the entries
   321  		for j := 0; j < len(compactions[i%2]); j++ {
   322  			compactions[i%2][j] = 0
   323  		}
   324  		for _, line := range lines {
   325  			parts := strings.Split(line, "|")
   326  			if len(parts) != 6 {
   327  				break
   328  			}
   329  			for idx, counter := range parts[2:] {
   330  				value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64)
   331  				if err != nil {
   332  					llog.Error("Compaction entry parsing failed", "err", err)
   333  					merr = err
   334  					continue
   335  				}
   336  				compactions[i%2][idx] += value
   337  			}
   338  		}
   339  		// Update all the requested meters
   340  		if db.diskSizeGauge != nil {
   341  			db.diskSizeGauge.Update(int64(compactions[i%2][0] * 1024 * 1024))
   342  		}
   343  		if db.compTimeMeter != nil {
   344  			db.compTimeMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1000 * 1000 * 1000))
   345  		}
   346  		if db.compReadMeter != nil {
   347  			db.compReadMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024))
   348  		}
   349  		if db.compWriteMeter != nil {
   350  			db.compWriteMeter.Mark(int64((compactions[i%2][3] - compactions[(i-1)%2][3]) * 1024 * 1024))
   351  		}
   352  		// Retrieve the write delay statistic
   353  		writedelay, err := db.db.GetProperty("leveldb.writedelay")
   354  		if err != nil {
   355  			llog.Error("Failed to read database write delay statistic", "err", err)
   356  			merr = err
   357  			continue
   358  		}
   359  		var (
   360  			delayN        int64
   361  			delayDuration string
   362  			duration      time.Duration
   363  			paused        bool
   364  		)
   365  		if n, err := fmt.Sscanf(writedelay, "DelayN:%d Delay:%s Paused:%t", &delayN, &delayDuration, &paused); n != 3 || err != nil {
   366  			llog.Error("Write delay statistic not found")
   367  			merr = err
   368  			continue
   369  		}
   370  		duration, err = time.ParseDuration(delayDuration)
   371  		if err != nil {
   372  			llog.Error("Failed to parse delay duration", "err", err)
   373  			merr = err
   374  			continue
   375  		}
   376  		if db.writeDelayNMeter != nil {
   377  			db.writeDelayNMeter.Mark(delayN - delaystats[0])
   378  		}
   379  		if db.writeDelayMeter != nil {
   380  			db.writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1])
   381  		}
   382  		// If a warning that db is performing compaction has been displayed, any subsequent
   383  		// warnings will be withheld for one minute not to overwhelm the user.
   384  		if paused && delayN-delaystats[0] == 0 && duration.Nanoseconds()-delaystats[1] == 0 &&
   385  			time.Now().After(lastWritePaused.Add(degradationWarnInterval)) {
   386  			llog.Warn("Database compacting, degraded performance")
   387  			lastWritePaused = time.Now()
   388  		}
   389  		delaystats[0], delaystats[1] = delayN, duration.Nanoseconds()
   390  
   391  		// Retrieve the database iostats.
   392  		ioStats, err := db.db.GetProperty("leveldb.iostats")
   393  		if err != nil {
   394  			llog.Error("Failed to read database iostats", "err", err)
   395  			merr = err
   396  			continue
   397  		}
   398  		var nRead, nWrite float64
   399  		parts := strings.Split(ioStats, " ")
   400  		if len(parts) < 2 {
   401  			llog.Error("Bad syntax of ioStats", "ioStats", ioStats)
   402  			merr = fmt.Errorf("bad syntax of ioStats %s", ioStats)
   403  			continue
   404  		}
   405  		if n, err := fmt.Sscanf(parts[0], "Read(MB):%f", &nRead); n != 1 || err != nil {
   406  			llog.Error("Bad syntax of read entry", "entry", parts[0])
   407  			merr = err
   408  			continue
   409  		}
   410  		if n, err := fmt.Sscanf(parts[1], "Write(MB):%f", &nWrite); n != 1 || err != nil {
   411  			llog.Error("Bad syntax of write entry", "entry", parts[1])
   412  			merr = err
   413  			continue
   414  		}
   415  		if db.diskReadMeter != nil {
   416  			db.diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024))
   417  		}
   418  		if db.diskWriteMeter != nil {
   419  			db.diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024))
   420  		}
   421  		iostats[0], iostats[1] = nRead, nWrite
   422  
   423  		//compCount, err := db.db.GetProperty("leveldb.compcount")
   424  		//if err != nil {
   425  		//	llog.Error("Failed to read database iostats", "err", err)
   426  		//	merr = err
   427  		//	continue
   428  		//}
   429  		//
   430  		//var (
   431  		//	memComp       uint32
   432  		//	level0Comp    uint32
   433  		//	nonLevel0Comp uint32
   434  		//	seekComp      uint32
   435  		//)
   436  		//if n, err := fmt.Sscanf(compCount, "MemComp:%d Level0Comp:%d NonLevel0Comp:%d SeekComp:%d", &memComp, &level0Comp, &nonLevel0Comp, &seekComp); n != 4 || err != nil {
   437  		//	llog.Error("Compaction count statistic not found")
   438  		//	merr = err
   439  		//	continue
   440  		//}
   441  		//db.memCompGauge.Update(int64(memComp))
   442  		//db.level0CompGauge.Update(int64(level0Comp))
   443  		//db.nonlevel0CompGauge.Update(int64(nonLevel0Comp))
   444  		//db.seekCompGauge.Update(int64(seekComp))
   445  
   446  		// Sleep a bit, then repeat the stats collection
   447  		select {
   448  		case errc = <-db.quitChan:
   449  			// Quit requesting, stop hammering the database
   450  		case <-time.After(refresh):
   451  			// Timeout, gather a new set of stats
   452  		}
   453  	}
   454  	if nil != merr {
   455  		llog.Error("level-db meter error", "err", merr.Error())
   456  	}
   457  
   458  	if errc == nil {
   459  		errc = <-db.quitChan
   460  	}
   461  	errc <- merr
   462  }
   463  
   464  type goLevelDBIt struct {
   465  	iterator.Iterator
   466  	itBase
   467  }
   468  
   469  //Close 关闭
   470  func (dbit *goLevelDBIt) Close() {
   471  	dbit.Iterator.Release()
   472  }
   473  
   474  //Next next
   475  func (dbit *goLevelDBIt) Next() bool {
   476  	if dbit.reverse {
   477  		return dbit.Iterator.Prev() && dbit.Valid()
   478  	}
   479  	return dbit.Iterator.Next() && dbit.Valid()
   480  }
   481  
   482  //Rewind ...
   483  func (dbit *goLevelDBIt) Rewind() bool {
   484  	if dbit.reverse {
   485  		return dbit.Iterator.Last() && dbit.Valid()
   486  	}
   487  	return dbit.Iterator.First() && dbit.Valid()
   488  }
   489  
   490  func (dbit *goLevelDBIt) Value() []byte {
   491  	return dbit.Iterator.Value()
   492  }
   493  
   494  func cloneByte(v []byte) []byte {
   495  	value := make([]byte, len(v))
   496  	copy(value, v)
   497  	return value
   498  }
   499  
   500  func (dbit *goLevelDBIt) ValueCopy() []byte {
   501  	v := dbit.Iterator.Value()
   502  	return cloneByte(v)
   503  }
   504  
   505  func (dbit *goLevelDBIt) Valid() bool {
   506  	return dbit.Iterator.Valid() && dbit.checkKey(dbit.Key())
   507  }
   508  
   509  type goLevelDBBatch struct {
   510  	db    *GoLevelDB
   511  	batch *leveldb.Batch
   512  	wop   *opt.WriteOptions
   513  	size  int
   514  	len   int
   515  }
   516  
   517  //NewBatch new
   518  func (db *GoLevelDB) NewBatch(sync bool) Batch {
   519  	batch := new(leveldb.Batch)
   520  	wop := &opt.WriteOptions{Sync: sync}
   521  	return &goLevelDBBatch{db, batch, wop, 0, 0}
   522  }
   523  
   524  func (mBatch *goLevelDBBatch) Set(key, value []byte) {
   525  	mBatch.batch.Put(key, value)
   526  	mBatch.size += len(key)
   527  	mBatch.size += len(value)
   528  	mBatch.len += len(value)
   529  }
   530  
   531  func (mBatch *goLevelDBBatch) Delete(key []byte) {
   532  	mBatch.batch.Delete(key)
   533  	mBatch.size += len(key)
   534  	mBatch.len++
   535  }
   536  
   537  func (mBatch *goLevelDBBatch) Write() error {
   538  	err := mBatch.db.db.Write(mBatch.batch, mBatch.wop)
   539  	if err != nil {
   540  		llog.Error("Write", "error", err)
   541  		return err
   542  	}
   543  	return nil
   544  }
   545  
   546  func (mBatch *goLevelDBBatch) ValueSize() int {
   547  	return mBatch.size
   548  }
   549  
   550  //ValueLen  batch数量
   551  func (mBatch *goLevelDBBatch) ValueLen() int {
   552  	return mBatch.len
   553  }
   554  
   555  func (mBatch *goLevelDBBatch) Reset() {
   556  	mBatch.batch.Reset()
   557  	mBatch.len = 0
   558  	mBatch.size = 0
   559  }
   560  
   561  func (mBatch *goLevelDBBatch) UpdateWriteSync(sync bool) {
   562  	mBatch.wop.Sync = sync
   563  }
   564  
   565  type goLevelDBTx struct {
   566  	tx *leveldb.Transaction
   567  }
   568  
   569  func (db *goLevelDBTx) Commit() error {
   570  	return db.tx.Commit()
   571  }
   572  
   573  func (db *goLevelDBTx) Rollback() {
   574  	db.tx.Discard()
   575  }
   576  
   577  //Get get in transaction
   578  func (db *goLevelDBTx) Get(key []byte) ([]byte, error) {
   579  	res, err := db.tx.Get(key, nil)
   580  	if err != nil {
   581  		if err == errors.ErrNotFound {
   582  			return nil, ErrNotFoundInDb
   583  		}
   584  		llog.Error("tx Get", "error", err)
   585  		return nil, err
   586  	}
   587  	return res, nil
   588  }
   589  
   590  //Set set in transaction
   591  func (db *goLevelDBTx) Set(key []byte, value []byte) error {
   592  	err := db.tx.Put(key, value, nil)
   593  	if err != nil {
   594  		llog.Error("tx Set", "error", err)
   595  		return err
   596  	}
   597  	return nil
   598  }
   599  
   600  //Iterator 迭代器 in transaction
   601  func (db *goLevelDBTx) Iterator(start []byte, end []byte, reverse bool) Iterator {
   602  	if end == nil {
   603  		end = bytesPrefix(start)
   604  	}
   605  	if bytes.Equal(end, types.EmptyValue) {
   606  		end = nil
   607  	}
   608  	r := &util.Range{Start: start, Limit: end}
   609  	it := db.tx.NewIterator(r, nil)
   610  	return &goLevelDBIt{it, itBase{start, end, reverse}}
   611  }
   612  
   613  //Begin call panic when Begin not rewrite
   614  func (db *goLevelDBTx) Begin() {
   615  	panic("Begin not impl")
   616  }