github.com/igggame/nebulas-go@v2.1.0+incompatible/storage/rocks_storage.go (about)

     1  package storage
     2  
     3  import (
     4  	"strconv"
     5  	"sync"
     6  	"time"
     7  
     8  	"github.com/nebulasio/go-nebulas/util/byteutils"
     9  	"github.com/syndtr/goleveldb/leveldb/opt"
    10  	"github.com/tecbot/gorocksdb"
    11  )
    12  
    13  // RocksStorage the nodes in trie.
    14  type RocksStorage struct {
    15  	db          *gorocksdb.DB
    16  	enableBatch bool
    17  	mutex       sync.Mutex
    18  	batchOpts   map[string]*batchOpt
    19  
    20  	ro *gorocksdb.ReadOptions
    21  	wo *gorocksdb.WriteOptions
    22  
    23  	cache *gorocksdb.Cache
    24  }
    25  
    26  // NewRocksStorage init a storage
    27  func NewRocksStorage(path string) (*RocksStorage, error) {
    28  
    29  	filter := gorocksdb.NewBloomFilter(10)
    30  	bbto := gorocksdb.NewDefaultBlockBasedTableOptions()
    31  	bbto.SetFilterPolicy(filter)
    32  
    33  	cache := gorocksdb.NewLRUCache(512 << 20)
    34  	bbto.SetBlockCache(cache)
    35  	opts := gorocksdb.NewDefaultOptions()
    36  	opts.SetBlockBasedTableFactory(bbto)
    37  	opts.SetCreateIfMissing(true)
    38  	opts.SetMaxOpenFiles(500)
    39  	opts.SetWriteBufferSize(64 * opt.MiB) //Default: 4MB
    40  	opts.IncreaseParallelism(4)           //flush and compaction thread
    41  	opts.SetKeepLogFileNum(1)
    42  
    43  	db, err := gorocksdb.OpenDb(opts, path)
    44  	if err != nil {
    45  		return nil, err
    46  	}
    47  
    48  	storage := &RocksStorage{
    49  		db:          db,
    50  		cache:       cache,
    51  		enableBatch: false,
    52  		batchOpts:   make(map[string]*batchOpt),
    53  		ro:          gorocksdb.NewDefaultReadOptions(),
    54  		wo:          gorocksdb.NewDefaultWriteOptions(),
    55  	}
    56  
    57  	//go RecordMetrics(storage)
    58  
    59  	return storage, nil
    60  }
    61  
    62  // Get return value to the key in Storage
    63  func (storage *RocksStorage) Get(key []byte) ([]byte, error) {
    64  
    65  	value, err := storage.db.GetBytes(storage.ro, key)
    66  
    67  	if err != nil {
    68  		return nil, err
    69  	}
    70  
    71  	if value == nil {
    72  		return nil, ErrKeyNotFound
    73  	}
    74  
    75  	return value, err
    76  }
    77  
    78  // Put put the key-value entry to Storage
    79  func (storage *RocksStorage) Put(key []byte, value []byte) error {
    80  	if storage.enableBatch {
    81  		storage.mutex.Lock()
    82  		defer storage.mutex.Unlock()
    83  
    84  		storage.batchOpts[byteutils.Hex(key)] = &batchOpt{
    85  			key:     key,
    86  			value:   value,
    87  			deleted: false,
    88  		}
    89  
    90  		return nil
    91  	}
    92  
    93  	return storage.db.Put(storage.wo, key, value)
    94  }
    95  
    96  // Del delete the key in Storage.
    97  func (storage *RocksStorage) Del(key []byte) error {
    98  	if storage.enableBatch {
    99  		storage.mutex.Lock()
   100  		defer storage.mutex.Unlock()
   101  
   102  		storage.batchOpts[byteutils.Hex(key)] = &batchOpt{
   103  			key:     key,
   104  			deleted: true,
   105  		}
   106  
   107  		return nil
   108  	}
   109  	return storage.db.Delete(storage.wo, key)
   110  }
   111  
   112  // Close levelDB
   113  func (storage *RocksStorage) Close() error {
   114  	storage.db.Close()
   115  	return nil
   116  }
   117  
   118  // EnableBatch enable batch write.
   119  func (storage *RocksStorage) EnableBatch() {
   120  	storage.enableBatch = true
   121  }
   122  
   123  // Flush write and flush pending batch write.
   124  func (storage *RocksStorage) Flush() error {
   125  	storage.mutex.Lock()
   126  	defer storage.mutex.Unlock()
   127  
   128  	if !storage.enableBatch {
   129  		return nil
   130  	}
   131  
   132  	startAt := time.Now().UnixNano()
   133  
   134  	wb := gorocksdb.NewWriteBatch()
   135  	defer wb.Destroy()
   136  
   137  	bl := len(storage.batchOpts)
   138  
   139  	for _, opt := range storage.batchOpts {
   140  		if opt.deleted {
   141  			wb.Delete(opt.key)
   142  		} else {
   143  			wb.Put(opt.key, opt.value)
   144  		}
   145  	}
   146  	storage.batchOpts = make(map[string]*batchOpt)
   147  
   148  	err := storage.db.Write(storage.wo, wb)
   149  
   150  	endAt := time.Now().UnixNano()
   151  	metricsRocksdbFlushTime.Update(endAt - startAt)
   152  	metricsRocksdbFlushLen.Update(int64(bl))
   153  
   154  	return err
   155  }
   156  
   157  // DisableBatch disable batch write.
   158  func (storage *RocksStorage) DisableBatch() {
   159  	storage.mutex.Lock()
   160  	defer storage.mutex.Unlock()
   161  	storage.batchOpts = make(map[string]*batchOpt)
   162  
   163  	storage.enableBatch = false
   164  }
   165  
   166  // RecordMetrics record rocksdb metrics
   167  func RecordMetrics(storage *RocksStorage) {
   168  	metricsUpdateChan := time.NewTicker(5 * time.Second).C
   169  
   170  	for {
   171  		select {
   172  		case <-metricsUpdateChan:
   173  
   174  			readersMemStr := storage.db.GetProperty("rocksdb.estimate-table-readers-mem")
   175  			allMemTablesStr := storage.db.GetProperty("rocksdb.cur-size-all-mem-tables")
   176  			cacheSize := storage.cache.GetUsage()
   177  			pinnedSize := storage.cache.GetPinnedUsage()
   178  
   179  			readersMem, err := strconv.Atoi(readersMemStr)
   180  			if err != nil {
   181  				break
   182  			}
   183  			allMemTables, err := strconv.Atoi(allMemTablesStr)
   184  			if err != nil {
   185  				break
   186  			}
   187  
   188  			metricsBlocksdbAllMemTables.Update(int64(allMemTables))
   189  			metricsBlocksdbTableReaderMem.Update(int64(readersMem))
   190  			metricsBlocksdbCacheSize.Update(int64(cacheSize))
   191  			metricsBlocksdbCachePinnedSize.Update(int64(pinnedSize))
   192  		}
   193  	}
   194  }