github.com/SmartMeshFoundation/Spectrum@v0.0.0-20220621030607-452a266fee1e/ethdb/database.go (about) 1 // Copyright 2014 The Spectrum Authors 2 // This file is part of the Spectrum library. 3 // 4 // The Spectrum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The Spectrum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the Spectrum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package ethdb 18 19 import ( 20 "strconv" 21 "strings" 22 "sync" 23 "time" 24 25 "github.com/SmartMeshFoundation/Spectrum/log" 26 "github.com/SmartMeshFoundation/Spectrum/metrics" 27 "github.com/syndtr/goleveldb/leveldb" 28 "github.com/syndtr/goleveldb/leveldb/errors" 29 "github.com/syndtr/goleveldb/leveldb/filter" 30 "github.com/syndtr/goleveldb/leveldb/iterator" 31 "github.com/syndtr/goleveldb/leveldb/opt" 32 33 gometrics "github.com/rcrowley/go-metrics" 34 ) 35 36 var OpenFileLimit = 32 37 38 type LDBDatabase struct { 39 fn string // filename for reporting 40 db *leveldb.DB // LevelDB instance 41 42 getTimer gometrics.Timer // Timer for measuring the database get request counts and latencies 43 putTimer gometrics.Timer // Timer for measuring the database put request counts and latencies 44 delTimer gometrics.Timer // Timer for measuring the database delete request counts and latencies 45 missMeter gometrics.Meter // Meter for measuring the missed database get requests 46 readMeter gometrics.Meter // Meter for measuring the database get request data usage 47 writeMeter gometrics.Meter // Meter for measuring the database put request data usage 48 compTimeMeter gometrics.Meter // Meter for measuring the total time spent in database compaction 49 compReadMeter gometrics.Meter // Meter for measuring the data read during compaction 50 compWriteMeter gometrics.Meter // Meter for measuring the data written during compaction 51 52 quitLock sync.Mutex // Mutex protecting the quit channel access 53 quitChan chan chan error // Quit channel to stop the metrics collection before closing the database 54 55 log log.Logger // Contextual logger tracking the database path 56 } 57 58 // NewLDBDatabase returns a LevelDB wrapped object. 59 func NewLDBDatabase(file string, cache int, handles int) (*LDBDatabase, error) { 60 logger := log.New("database", file) 61 62 // Ensure we have some minimal caching and file guarantees 63 // default cache = 128 64 //if cache < 16 { 65 // cache = 16 66 //} 67 //if handles < 8 { 68 // handles = 8 69 //} 70 71 logger.Info("Allocated cache and file handles", "cache", cache, "handles", handles) 72 73 // Open the db and recover any potential corruptions 74 db, err := leveldb.OpenFile(file, &opt.Options{ 75 OpenFilesCacheCapacity: 8, 76 // default cache == 128 77 BlockCacheCapacity: 8 * opt.MiB, // default : 128 / 2 = 64 78 WriteBuffer: 8 * opt.MiB, // Two of these are used internally , default : 128 / 4 = 32 79 Filter: filter.NewBloomFilter(20), 80 }) 81 if _, corrupted := err.(*errors.ErrCorrupted); corrupted { 82 db, err = leveldb.RecoverFile(file, nil) 83 } 84 // (Re)check for errors and abort if opening of the db failed 85 if err != nil { 86 return nil, err 87 } 88 return &LDBDatabase{ 89 fn: file, 90 db: db, 91 log: logger, 92 }, nil 93 } 94 95 // Path returns the path to the database directory. 96 func (db *LDBDatabase) Path() string { 97 return db.fn 98 } 99 100 // Put puts the given key / value to the queue 101 func (db *LDBDatabase) Put(key []byte, value []byte) error { 102 // Measure the database put latency, if requested 103 if db.putTimer != nil { 104 defer db.putTimer.UpdateSince(time.Now()) 105 } 106 // Generate the data to write to disk, update the meter and write 107 //value = rle.Compress(value) 108 109 if db.writeMeter != nil { 110 db.writeMeter.Mark(int64(len(value))) 111 } 112 return db.db.Put(key, value, nil) 113 } 114 115 func (db *LDBDatabase) Has(key []byte) (bool, error) { 116 return db.db.Has(key, nil) 117 } 118 119 // Get returns the given key if it's present. 120 func (db *LDBDatabase) Get(key []byte) ([]byte, error) { 121 // Measure the database get latency, if requested 122 if db.getTimer != nil { 123 defer db.getTimer.UpdateSince(time.Now()) 124 } 125 // Retrieve the key and increment the miss counter if not found 126 dat, err := db.db.Get(key, nil) 127 if err != nil { 128 if db.missMeter != nil { 129 db.missMeter.Mark(1) 130 } 131 return nil, err 132 } 133 // Otherwise update the actually retrieved amount of data 134 if db.readMeter != nil { 135 db.readMeter.Mark(int64(len(dat))) 136 } 137 return dat, nil 138 //return rle.Decompress(dat) 139 } 140 141 // Delete deletes the key from the queue and database 142 func (db *LDBDatabase) Delete(key []byte) error { 143 // Measure the database delete latency, if requested 144 if db.delTimer != nil { 145 defer db.delTimer.UpdateSince(time.Now()) 146 } 147 // Execute the actual operation 148 return db.db.Delete(key, nil) 149 } 150 151 func (db *LDBDatabase) NewIterator() iterator.Iterator { 152 return db.db.NewIterator(nil, nil) 153 } 154 155 func (db *LDBDatabase) Close() { 156 // Stop the metrics collection to avoid internal database races 157 db.quitLock.Lock() 158 defer db.quitLock.Unlock() 159 160 if db.quitChan != nil { 161 errc := make(chan error) 162 db.quitChan <- errc 163 if err := <-errc; err != nil { 164 db.log.Error("Metrics collection failed", "err", err) 165 } 166 } 167 err := db.db.Close() 168 if err == nil { 169 db.log.Info("Database closed") 170 } else { 171 db.log.Error("Failed to close database", "err", err) 172 } 173 } 174 175 func (db *LDBDatabase) LDB() *leveldb.DB { 176 return db.db 177 } 178 179 // Meter configures the database metrics collectors and 180 func (db *LDBDatabase) Meter(prefix string) { 181 // Short circuit metering if the metrics system is disabled 182 if !metrics.Enabled { 183 return 184 } 185 // Initialize all the metrics collector at the requested prefix 186 db.getTimer = metrics.NewTimer(prefix + "user/gets") 187 db.putTimer = metrics.NewTimer(prefix + "user/puts") 188 db.delTimer = metrics.NewTimer(prefix + "user/dels") 189 db.missMeter = metrics.NewMeter(prefix + "user/misses") 190 db.readMeter = metrics.NewMeter(prefix + "user/reads") 191 db.writeMeter = metrics.NewMeter(prefix + "user/writes") 192 db.compTimeMeter = metrics.NewMeter(prefix + "compact/time") 193 db.compReadMeter = metrics.NewMeter(prefix + "compact/input") 194 db.compWriteMeter = metrics.NewMeter(prefix + "compact/output") 195 196 // Create a quit channel for the periodic collector and run it 197 db.quitLock.Lock() 198 db.quitChan = make(chan chan error) 199 db.quitLock.Unlock() 200 201 go db.meter(3 * time.Second) 202 } 203 204 // meter periodically retrieves internal leveldb counters and reports them to the metrics subsystem. 205 // 206 // This is how a stats table look like (currently): 207 // Compactions 208 // Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB) 209 // -------+------------+---------------+---------------+---------------+--------------- 210 // 0 | 0 | 0.00000 | 1.27969 | 0.00000 | 12.31098 211 // 1 | 85 | 109.27913 | 28.09293 | 213.92493 | 214.26294 212 // 2 | 523 | 1000.37159 | 7.26059 | 66.86342 | 66.77884 213 // 3 | 570 | 1113.18458 | 0.00000 | 0.00000 | 0.00000 214 func (db *LDBDatabase) meter(refresh time.Duration) { 215 // Create the counters to store current and previous values 216 counters := make([][]float64, 2) 217 for i := 0; i < 2; i++ { 218 counters[i] = make([]float64, 3) 219 } 220 // Iterate ad infinitum and collect the stats 221 for i := 1; ; i++ { 222 // Retrieve the database stats 223 stats, err := db.db.GetProperty("leveldb.stats") 224 if err != nil { 225 db.log.Error("Failed to read database stats", "err", err) 226 return 227 } 228 // Find the compaction table, skip the header 229 lines := strings.Split(stats, "\n") 230 for len(lines) > 0 && strings.TrimSpace(lines[0]) != "Compactions" { 231 lines = lines[1:] 232 } 233 if len(lines) <= 3 { 234 db.log.Error("Compaction table not found") 235 return 236 } 237 lines = lines[3:] 238 239 // Iterate over all the table rows, and accumulate the entries 240 for j := 0; j < len(counters[i%2]); j++ { 241 counters[i%2][j] = 0 242 } 243 for _, line := range lines { 244 parts := strings.Split(line, "|") 245 if len(parts) != 6 { 246 break 247 } 248 for idx, counter := range parts[3:] { 249 value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64) 250 if err != nil { 251 db.log.Error("Compaction entry parsing failed", "err", err) 252 return 253 } 254 counters[i%2][idx] += value 255 } 256 } 257 // Update all the requested meters 258 if db.compTimeMeter != nil { 259 db.compTimeMeter.Mark(int64((counters[i%2][0] - counters[(i-1)%2][0]) * 1000 * 1000 * 1000)) 260 } 261 if db.compReadMeter != nil { 262 db.compReadMeter.Mark(int64((counters[i%2][1] - counters[(i-1)%2][1]) * 1024 * 1024)) 263 } 264 if db.compWriteMeter != nil { 265 db.compWriteMeter.Mark(int64((counters[i%2][2] - counters[(i-1)%2][2]) * 1024 * 1024)) 266 } 267 // Sleep a bit, then repeat the stats collection 268 select { 269 case errc := <-db.quitChan: 270 // Quit requesting, stop hammering the database 271 errc <- nil 272 return 273 274 case <-time.After(refresh): 275 // Timeout, gather a new set of stats 276 } 277 } 278 } 279 280 func (db *LDBDatabase) NewBatch() Batch { 281 return &ldbBatch{db: db.db, b: new(leveldb.Batch)} 282 } 283 284 type ldbBatch struct { 285 db *leveldb.DB 286 b *leveldb.Batch 287 size int 288 } 289 290 func (b *ldbBatch) Put(key, value []byte) error { 291 b.b.Put(key, value) 292 b.size += len(value) 293 return nil 294 } 295 296 func (b *ldbBatch) Write() error { 297 return b.db.Write(b.b, nil) 298 } 299 300 func (b *ldbBatch) ValueSize() int { 301 return b.size 302 } 303 304 type table struct { 305 db Database 306 prefix string 307 } 308 309 // NewTable returns a Database object that prefixes all keys with a given 310 // string. 311 func NewTable(db Database, prefix string) Database { 312 return &table{ 313 db: db, 314 prefix: prefix, 315 } 316 } 317 318 func (dt *table) Put(key []byte, value []byte) error { 319 return dt.db.Put(append([]byte(dt.prefix), key...), value) 320 } 321 322 func (dt *table) Has(key []byte) (bool, error) { 323 return dt.db.Has(append([]byte(dt.prefix), key...)) 324 } 325 326 func (dt *table) Get(key []byte) ([]byte, error) { 327 return dt.db.Get(append([]byte(dt.prefix), key...)) 328 } 329 330 func (dt *table) Delete(key []byte) error { 331 return dt.db.Delete(append([]byte(dt.prefix), key...)) 332 } 333 334 func (dt *table) Close() { 335 // Do nothing; don't close the underlying DB. 336 } 337 338 type tableBatch struct { 339 batch Batch 340 prefix string 341 } 342 343 // NewTableBatch returns a Batch object which prefixes all keys with a given string. 344 func NewTableBatch(db Database, prefix string) Batch { 345 return &tableBatch{db.NewBatch(), prefix} 346 } 347 348 func (dt *table) NewBatch() Batch { 349 return &tableBatch{dt.db.NewBatch(), dt.prefix} 350 } 351 352 func (tb *tableBatch) Put(key, value []byte) error { 353 return tb.batch.Put(append([]byte(tb.prefix), key...), value) 354 } 355 356 func (tb *tableBatch) Write() error { 357 return tb.batch.Write() 358 } 359 360 func (tb *tableBatch) ValueSize() int { 361 return tb.batch.ValueSize() 362 }