github.com/neatio-net/neatio@v1.7.3-0.20231114194659-f4d7a2226baa/neatdb/leveldb/leveldb.go (about)

     1  package leveldb
     2  
     3  import (
     4  	"fmt"
     5  	"strconv"
     6  	"strings"
     7  	"sync"
     8  	"time"
     9  
    10  	"github.com/neatio-net/neatio/chain/log"
    11  	"github.com/neatio-net/neatio/neatdb"
    12  	"github.com/neatio-net/neatio/utilities/metrics"
    13  	"github.com/syndtr/goleveldb/leveldb"
    14  	"github.com/syndtr/goleveldb/leveldb/errors"
    15  	"github.com/syndtr/goleveldb/leveldb/filter"
    16  	"github.com/syndtr/goleveldb/leveldb/opt"
    17  	"github.com/syndtr/goleveldb/leveldb/util"
    18  )
    19  
    20  const (
    21  	degradationWarnInterval = time.Minute
    22  
    23  	minCache = 16
    24  
    25  	minHandles = 16
    26  
    27  	metricsGatheringInterval = 3 * time.Second
    28  )
    29  
    30  type Database struct {
    31  	fn string
    32  	db *leveldb.DB
    33  
    34  	compTimeMeter    metrics.Meter
    35  	compReadMeter    metrics.Meter
    36  	compWriteMeter   metrics.Meter
    37  	writeDelayNMeter metrics.Meter
    38  	writeDelayMeter  metrics.Meter
    39  	diskReadMeter    metrics.Meter
    40  	diskWriteMeter   metrics.Meter
    41  
    42  	quitLock sync.Mutex
    43  	quitChan chan chan error
    44  
    45  	log log.Logger
    46  }
    47  
    48  func New(file string, cache int, handles int, namespace string) (*Database, error) {
    49  
    50  	if cache < minCache {
    51  		cache = minCache
    52  	}
    53  	if handles < minHandles {
    54  		handles = minHandles
    55  	}
    56  	logger := log.New("database", file)
    57  
    58  	db, err := leveldb.OpenFile(file, &opt.Options{
    59  		OpenFilesCacheCapacity: handles,
    60  		BlockCacheCapacity:     cache / 2 * opt.MiB,
    61  		WriteBuffer:            cache / 4 * opt.MiB,
    62  		Filter:                 filter.NewBloomFilter(10),
    63  	})
    64  	if _, corrupted := err.(*errors.ErrCorrupted); corrupted {
    65  		db, err = leveldb.RecoverFile(file, nil)
    66  	}
    67  	if err != nil {
    68  		return nil, err
    69  	}
    70  
    71  	ldb := &Database{
    72  		fn:       file,
    73  		db:       db,
    74  		log:      logger,
    75  		quitChan: make(chan chan error),
    76  	}
    77  	ldb.compTimeMeter = metrics.NewRegisteredMeter(namespace+"compact/time", nil)
    78  	ldb.compReadMeter = metrics.NewRegisteredMeter(namespace+"compact/input", nil)
    79  	ldb.compWriteMeter = metrics.NewRegisteredMeter(namespace+"compact/output", nil)
    80  	ldb.diskReadMeter = metrics.NewRegisteredMeter(namespace+"disk/read", nil)
    81  	ldb.diskWriteMeter = metrics.NewRegisteredMeter(namespace+"disk/write", nil)
    82  	ldb.writeDelayMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/duration", nil)
    83  	ldb.writeDelayNMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/counter", nil)
    84  
    85  	go ldb.meter(metricsGatheringInterval)
    86  	return ldb, nil
    87  }
    88  
    89  func (db *Database) Close() error {
    90  	db.quitLock.Lock()
    91  	defer db.quitLock.Unlock()
    92  
    93  	if db.quitChan != nil {
    94  		errc := make(chan error)
    95  		db.quitChan <- errc
    96  		if err := <-errc; err != nil {
    97  			db.log.Error("Metrics collection failed", "err", err)
    98  		}
    99  		db.quitChan = nil
   100  	}
   101  	err := db.db.Close()
   102  	if err == nil {
   103  		db.log.Info("Database closed")
   104  	} else {
   105  		db.log.Error("Failed to close database", "err", err)
   106  	}
   107  	return err
   108  }
   109  
   110  func (db *Database) Has(key []byte) (bool, error) {
   111  	return db.db.Has(key, nil)
   112  }
   113  
   114  func (db *Database) Get(key []byte) ([]byte, error) {
   115  	dat, err := db.db.Get(key, nil)
   116  	if err != nil {
   117  		return nil, err
   118  	}
   119  	return dat, nil
   120  }
   121  
   122  func (db *Database) Put(key []byte, value []byte) error {
   123  	return db.db.Put(key, value, nil)
   124  }
   125  
   126  func (db *Database) Delete(key []byte) error {
   127  	return db.db.Delete(key, nil)
   128  }
   129  
   130  func (db *Database) NewBatch() neatdb.Batch {
   131  	return &batch{
   132  		db: db.db,
   133  		b:  new(leveldb.Batch),
   134  	}
   135  }
   136  
   137  func (db *Database) NewIterator() neatdb.Iterator {
   138  	return db.NewIteratorWithPrefix(nil)
   139  }
   140  
   141  func (db *Database) NewIteratorWithPrefix(prefix []byte) neatdb.Iterator {
   142  	return db.db.NewIterator(util.BytesPrefix(prefix), nil)
   143  }
   144  
   145  func (db *Database) Stat(property string) (string, error) {
   146  	return db.db.GetProperty(property)
   147  }
   148  
   149  func (db *Database) Compact(start []byte, limit []byte) error {
   150  	return db.db.CompactRange(util.Range{Start: start, Limit: limit})
   151  }
   152  
   153  func (db *Database) Path() string {
   154  	return db.fn
   155  }
   156  
   157  func (db *Database) meter(refresh time.Duration) {
   158  
   159  	compactions := make([][]float64, 2)
   160  	for i := 0; i < 2; i++ {
   161  		compactions[i] = make([]float64, 3)
   162  	}
   163  
   164  	var iostats [2]float64
   165  
   166  	var (
   167  		delaystats      [2]int64
   168  		lastWritePaused time.Time
   169  	)
   170  
   171  	var (
   172  		errc chan error
   173  		merr error
   174  	)
   175  
   176  	for i := 1; errc == nil && merr == nil; i++ {
   177  
   178  		stats, err := db.db.GetProperty("leveldb.stats")
   179  		if err != nil {
   180  			db.log.Error("Failed to read database stats", "err", err)
   181  			merr = err
   182  			continue
   183  		}
   184  
   185  		lines := strings.Split(stats, "\n")
   186  		for len(lines) > 0 && strings.TrimSpace(lines[0]) != "Compactions" {
   187  			lines = lines[1:]
   188  		}
   189  		if len(lines) <= 3 {
   190  			db.log.Error("Compaction leveldbTable not found")
   191  			merr = errors.New("compaction leveldbTable not found")
   192  			continue
   193  		}
   194  		lines = lines[3:]
   195  
   196  		for j := 0; j < len(compactions[i%2]); j++ {
   197  			compactions[i%2][j] = 0
   198  		}
   199  		for _, line := range lines {
   200  			parts := strings.Split(line, "|")
   201  			if len(parts) != 6 {
   202  				break
   203  			}
   204  			for idx, counter := range parts[3:] {
   205  				value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64)
   206  				if err != nil {
   207  					db.log.Error("Compaction entry parsing failed", "err", err)
   208  					merr = err
   209  					continue
   210  				}
   211  				compactions[i%2][idx] += value
   212  			}
   213  		}
   214  
   215  		if db.compTimeMeter != nil {
   216  			db.compTimeMeter.Mark(int64((compactions[i%2][0] - compactions[(i-1)%2][0]) * 1000 * 1000 * 1000))
   217  		}
   218  		if db.compReadMeter != nil {
   219  			db.compReadMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1024 * 1024))
   220  		}
   221  		if db.compWriteMeter != nil {
   222  			db.compWriteMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024))
   223  		}
   224  
   225  		writedelay, err := db.db.GetProperty("leveldb.writedelay")
   226  		if err != nil {
   227  			db.log.Error("Failed to read database write delay statistic", "err", err)
   228  			merr = err
   229  			continue
   230  		}
   231  		var (
   232  			delayN        int64
   233  			delayDuration string
   234  			duration      time.Duration
   235  			paused        bool
   236  		)
   237  		if n, err := fmt.Sscanf(writedelay, "DelayN:%d Delay:%s Paused:%t", &delayN, &delayDuration, &paused); n != 3 || err != nil {
   238  			db.log.Error("Write delay statistic not found")
   239  			merr = err
   240  			continue
   241  		}
   242  		duration, err = time.ParseDuration(delayDuration)
   243  		if err != nil {
   244  			db.log.Error("Failed to parse delay duration", "err", err)
   245  			merr = err
   246  			continue
   247  		}
   248  		if db.writeDelayNMeter != nil {
   249  			db.writeDelayNMeter.Mark(delayN - delaystats[0])
   250  		}
   251  		if db.writeDelayMeter != nil {
   252  			db.writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1])
   253  		}
   254  
   255  		if paused && delayN-delaystats[0] == 0 && duration.Nanoseconds()-delaystats[1] == 0 &&
   256  			time.Now().After(lastWritePaused.Add(degradationWarnInterval)) {
   257  			db.log.Warn("Database compacting, degraded performance")
   258  			lastWritePaused = time.Now()
   259  		}
   260  		delaystats[0], delaystats[1] = delayN, duration.Nanoseconds()
   261  
   262  		ioStats, err := db.db.GetProperty("leveldb.iostats")
   263  		if err != nil {
   264  			db.log.Error("Failed to read database iostats", "err", err)
   265  			merr = err
   266  			continue
   267  		}
   268  		var nRead, nWrite float64
   269  		parts := strings.Split(ioStats, " ")
   270  		if len(parts) < 2 {
   271  			db.log.Error("Bad syntax of ioStats", "ioStats", ioStats)
   272  			merr = fmt.Errorf("bad syntax of ioStats %s", ioStats)
   273  			continue
   274  		}
   275  		if n, err := fmt.Sscanf(parts[0], "Read(MB):%f", &nRead); n != 1 || err != nil {
   276  			db.log.Error("Bad syntax of read entry", "entry", parts[0])
   277  			merr = err
   278  			continue
   279  		}
   280  		if n, err := fmt.Sscanf(parts[1], "Write(MB):%f", &nWrite); n != 1 || err != nil {
   281  			db.log.Error("Bad syntax of write entry", "entry", parts[1])
   282  			merr = err
   283  			continue
   284  		}
   285  		if db.diskReadMeter != nil {
   286  			db.diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024))
   287  		}
   288  		if db.diskWriteMeter != nil {
   289  			db.diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024))
   290  		}
   291  		iostats[0], iostats[1] = nRead, nWrite
   292  
   293  		select {
   294  		case errc = <-db.quitChan:
   295  
   296  		case <-time.After(refresh):
   297  
   298  		}
   299  	}
   300  
   301  	if errc == nil {
   302  		errc = <-db.quitChan
   303  	}
   304  	errc <- merr
   305  }
   306  
   307  type batch struct {
   308  	db   *leveldb.DB
   309  	b    *leveldb.Batch
   310  	size int
   311  }
   312  
   313  func (b *batch) Put(key, value []byte) error {
   314  	b.b.Put(key, value)
   315  	b.size += len(value)
   316  	return nil
   317  }
   318  
   319  func (b *batch) Delete(key []byte) error {
   320  	b.b.Delete(key)
   321  	b.size++
   322  	return nil
   323  }
   324  
   325  func (b *batch) ValueSize() int {
   326  	return b.size
   327  }
   328  
   329  func (b *batch) Write() error {
   330  	return b.db.Write(b.b, nil)
   331  }
   332  
   333  func (b *batch) Reset() {
   334  	b.b.Reset()
   335  	b.size = 0
   336  }
   337  
   338  func (b *batch) Replay(w neatdb.Writer) error {
   339  	return b.b.Replay(&replayer{writer: w})
   340  }
   341  
   342  type replayer struct {
   343  	writer  neatdb.Writer
   344  	failure error
   345  }
   346  
   347  func (r *replayer) Put(key, value []byte) {
   348  
   349  	if r.failure != nil {
   350  		return
   351  	}
   352  	r.failure = r.writer.Put(key, value)
   353  }
   354  
   355  func (r *replayer) Delete(key []byte) {
   356  
   357  	if r.failure != nil {
   358  		return
   359  	}
   360  	r.failure = r.writer.Delete(key)
   361  }