github.com/intfoundation/intchain@v0.0.0-20220727031208-4316ad31ca73/intdb/leveldb/leveldb.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // +build !js 18 19 // Package leveldb implements the key-value database layer based on LevelDB. 20 package leveldb 21 22 import ( 23 "fmt" 24 "strconv" 25 "strings" 26 "sync" 27 "time" 28 29 "github.com/intfoundation/intchain/common" 30 "github.com/intfoundation/intchain/intdb" 31 "github.com/intfoundation/intchain/log" 32 "github.com/intfoundation/intchain/metrics" 33 "github.com/syndtr/goleveldb/leveldb" 34 "github.com/syndtr/goleveldb/leveldb/errors" 35 "github.com/syndtr/goleveldb/leveldb/filter" 36 "github.com/syndtr/goleveldb/leveldb/opt" 37 "github.com/syndtr/goleveldb/leveldb/util" 38 ) 39 40 const ( 41 // degradationWarnInterval specifies how often warning should be printed if the 42 // leveldb database cannot keep up with requested writes. 43 degradationWarnInterval = time.Minute 44 45 // minCache is the minimum amount of memory in megabytes to allocate to leveldb 46 // read and write caching, split half and half. 47 minCache = 16 48 49 // minHandles is the minimum number of files handles to allocate to the open 50 // database files. 51 minHandles = 16 52 53 // metricsGatheringInterval specifies the interval to retrieve leveldb database 54 // compaction, io and pause stats to report to the user. 55 metricsGatheringInterval = 3 * time.Second 56 ) 57 58 // Database is a persistent key-value store. Apart from basic data storage 59 // functionality it also supports batch writes and iterating over the keyspace in 60 // binary-alphabetical order. 61 type Database struct { 62 fn string // filename for reporting 63 db *leveldb.DB // LevelDB instance 64 65 compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction 66 compReadMeter metrics.Meter // Meter for measuring the data read during compaction 67 compWriteMeter metrics.Meter // Meter for measuring the data written during compaction 68 writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction 69 writeDelayMeter metrics.Meter // Meter for measuring the write delay duration due to database compaction 70 diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read 71 diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written 72 73 quitLock sync.Mutex // Mutex protecting the quit channel access 74 quitChan chan chan error // Quit channel to stop the metrics collection before closing the database 75 76 log log.Logger // Contextual logger tracking the database path 77 } 78 79 // New returns a wrapped LevelDB object. The namespace is the prefix that the 80 // metrics reporting should use for surfacing internal stats. 81 func New(file string, cache int, handles int, namespace string) (*Database, error) { 82 // Ensure we have some minimal caching and file guarantees 83 if cache < minCache { 84 cache = minCache 85 } 86 if handles < minHandles { 87 handles = minHandles 88 } 89 logger := log.New("database", file) 90 logger.Info("Allocated cache and file handles", "cache", common.StorageSize(cache*1024*1024), "handles", handles) 91 92 // Open the db and recover any potential corruptions 93 db, err := leveldb.OpenFile(file, &opt.Options{ 94 OpenFilesCacheCapacity: handles, 95 BlockCacheCapacity: cache / 2 * opt.MiB, 96 WriteBuffer: cache / 4 * opt.MiB, // Two of these are used internally 97 Filter: filter.NewBloomFilter(10), 98 }) 99 if _, corrupted := err.(*errors.ErrCorrupted); corrupted { 100 db, err = leveldb.RecoverFile(file, nil) 101 } 102 if err != nil { 103 return nil, err 104 } 105 // Assemble the wrapper with all the registered metrics 106 ldb := &Database{ 107 fn: file, 108 db: db, 109 log: logger, 110 quitChan: make(chan chan error), 111 } 112 ldb.compTimeMeter = metrics.NewRegisteredMeter(namespace+"compact/time", nil) 113 ldb.compReadMeter = metrics.NewRegisteredMeter(namespace+"compact/input", nil) 114 ldb.compWriteMeter = metrics.NewRegisteredMeter(namespace+"compact/output", nil) 115 ldb.diskReadMeter = metrics.NewRegisteredMeter(namespace+"disk/read", nil) 116 ldb.diskWriteMeter = metrics.NewRegisteredMeter(namespace+"disk/write", nil) 117 ldb.writeDelayMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/duration", nil) 118 ldb.writeDelayNMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/counter", nil) 119 120 // Start up the metrics gathering and return 121 go ldb.meter(metricsGatheringInterval) 122 return ldb, nil 123 } 124 125 // Close stops the metrics collection, flushes any pending data to disk and closes 126 // all io accesses to the underlying key-value store. 127 func (db *Database) Close() error { 128 db.quitLock.Lock() 129 defer db.quitLock.Unlock() 130 131 if db.quitChan != nil { 132 errc := make(chan error) 133 db.quitChan <- errc 134 if err := <-errc; err != nil { 135 db.log.Error("Metrics collection failed", "err", err) 136 } 137 db.quitChan = nil 138 } 139 err := db.db.Close() 140 if err == nil { 141 db.log.Info("Database closed") 142 } else { 143 db.log.Error("Failed to close database", "err", err) 144 } 145 return err 146 } 147 148 // Has retrieves if a key is present in the key-value store. 149 func (db *Database) Has(key []byte) (bool, error) { 150 return db.db.Has(key, nil) 151 } 152 153 // Get retrieves the given key if it's present in the key-value store. 154 func (db *Database) Get(key []byte) ([]byte, error) { 155 dat, err := db.db.Get(key, nil) 156 if err != nil { 157 return nil, err 158 } 159 return dat, nil 160 } 161 162 // Put inserts the given value into the key-value store. 163 func (db *Database) Put(key []byte, value []byte) error { 164 return db.db.Put(key, value, nil) 165 } 166 167 // Delete removes the key from the key-value store. 168 func (db *Database) Delete(key []byte) error { 169 return db.db.Delete(key, nil) 170 } 171 172 // NewBatch creates a write-only key-value store that buffers changes to its host 173 // database until a final write is called. 174 func (db *Database) NewBatch() intdb.Batch { 175 return &batch{ 176 db: db.db, 177 b: new(leveldb.Batch), 178 } 179 } 180 181 // NewIterator creates a binary-alphabetical iterator over the entire keyspace 182 // contained within the leveldb database. 183 func (db *Database) NewIterator() intdb.Iterator { 184 return db.NewIteratorWithPrefix(nil) 185 } 186 187 // NewIteratorWithPrefix creates a binary-alphabetical iterator over a subset 188 // of database content with a particular key prefix. 189 func (db *Database) NewIteratorWithPrefix(prefix []byte) intdb.Iterator { 190 return db.db.NewIterator(util.BytesPrefix(prefix), nil) 191 } 192 193 // Stat returns a particular internal stat of the database. 194 func (db *Database) Stat(property string) (string, error) { 195 return db.db.GetProperty(property) 196 } 197 198 // Compact flattens the underlying data store for the given key range. In essence, 199 // deleted and overwritten versions are discarded, and the data is rearranged to 200 // reduce the cost of operations needed to access them. 201 // 202 // A nil start is treated as a key before all keys in the data store; a nil limit 203 // is treated as a key after all keys in the data store. If both is nil then it 204 // will compact entire data store. 205 func (db *Database) Compact(start []byte, limit []byte) error { 206 return db.db.CompactRange(util.Range{Start: start, Limit: limit}) 207 } 208 209 // Path returns the path to the database directory. 210 func (db *Database) Path() string { 211 return db.fn 212 } 213 214 // meter periodically retrieves internal leveldb counters and reports them to 215 // the metrics subsystem. 216 // 217 // This is how a LevelDB stats table looks like (currently): 218 // Compactions 219 // Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB) 220 // -------+------------+---------------+---------------+---------------+--------------- 221 // 0 | 0 | 0.00000 | 1.27969 | 0.00000 | 12.31098 222 // 1 | 85 | 109.27913 | 28.09293 | 213.92493 | 214.26294 223 // 2 | 523 | 1000.37159 | 7.26059 | 66.86342 | 66.77884 224 // 3 | 570 | 1113.18458 | 0.00000 | 0.00000 | 0.00000 225 // 226 // This is how the write delay look like (currently): 227 // DelayN:5 Delay:406.604657ms Paused: false 228 // 229 // This is how the iostats look like (currently): 230 // Read(MB):3895.04860 Write(MB):3654.64712 231 func (db *Database) meter(refresh time.Duration) { 232 // Create the counters to store current and previous compaction values 233 compactions := make([][]float64, 2) 234 for i := 0; i < 2; i++ { 235 compactions[i] = make([]float64, 3) 236 } 237 // Create storage for iostats. 238 var iostats [2]float64 239 240 // Create storage and warning log tracer for write delay. 241 var ( 242 delaystats [2]int64 243 lastWritePaused time.Time 244 ) 245 246 var ( 247 errc chan error 248 merr error 249 ) 250 251 // Iterate ad infinitum and collect the stats 252 for i := 1; errc == nil && merr == nil; i++ { 253 // Retrieve the database stats 254 stats, err := db.db.GetProperty("leveldb.stats") 255 if err != nil { 256 db.log.Error("Failed to read database stats", "err", err) 257 merr = err 258 continue 259 } 260 // Find the compaction table, skip the header 261 lines := strings.Split(stats, "\n") 262 for len(lines) > 0 && strings.TrimSpace(lines[0]) != "Compactions" { 263 lines = lines[1:] 264 } 265 if len(lines) <= 3 { 266 db.log.Error("Compaction leveldbTable not found") 267 merr = errors.New("compaction leveldbTable not found") 268 continue 269 } 270 lines = lines[3:] 271 272 // Iterate over all the leveldbTable rows, and accumulate the entries 273 for j := 0; j < len(compactions[i%2]); j++ { 274 compactions[i%2][j] = 0 275 } 276 for _, line := range lines { 277 parts := strings.Split(line, "|") 278 if len(parts) != 6 { 279 break 280 } 281 for idx, counter := range parts[3:] { 282 value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64) 283 if err != nil { 284 db.log.Error("Compaction entry parsing failed", "err", err) 285 merr = err 286 continue 287 } 288 compactions[i%2][idx] += value 289 } 290 } 291 // Update all the requested meters 292 if db.compTimeMeter != nil { 293 db.compTimeMeter.Mark(int64((compactions[i%2][0] - compactions[(i-1)%2][0]) * 1000 * 1000 * 1000)) 294 } 295 if db.compReadMeter != nil { 296 db.compReadMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1024 * 1024)) 297 } 298 if db.compWriteMeter != nil { 299 db.compWriteMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024)) 300 } 301 302 // Retrieve the write delay statistic 303 writedelay, err := db.db.GetProperty("leveldb.writedelay") 304 if err != nil { 305 db.log.Error("Failed to read database write delay statistic", "err", err) 306 merr = err 307 continue 308 } 309 var ( 310 delayN int64 311 delayDuration string 312 duration time.Duration 313 paused bool 314 ) 315 if n, err := fmt.Sscanf(writedelay, "DelayN:%d Delay:%s Paused:%t", &delayN, &delayDuration, &paused); n != 3 || err != nil { 316 db.log.Error("Write delay statistic not found") 317 merr = err 318 continue 319 } 320 duration, err = time.ParseDuration(delayDuration) 321 if err != nil { 322 db.log.Error("Failed to parse delay duration", "err", err) 323 merr = err 324 continue 325 } 326 if db.writeDelayNMeter != nil { 327 db.writeDelayNMeter.Mark(delayN - delaystats[0]) 328 } 329 if db.writeDelayMeter != nil { 330 db.writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1]) 331 } 332 // If a warning that db is performing compaction has been displayed, any subsequent 333 // warnings will be withheld for one minute not to overwhelm the user. 334 if paused && delayN-delaystats[0] == 0 && duration.Nanoseconds()-delaystats[1] == 0 && 335 time.Now().After(lastWritePaused.Add(degradationWarnInterval)) { 336 db.log.Warn("Database compacting, degraded performance") 337 lastWritePaused = time.Now() 338 } 339 delaystats[0], delaystats[1] = delayN, duration.Nanoseconds() 340 341 // Retrieve the database iostats. 342 ioStats, err := db.db.GetProperty("leveldb.iostats") 343 if err != nil { 344 db.log.Error("Failed to read database iostats", "err", err) 345 merr = err 346 continue 347 } 348 var nRead, nWrite float64 349 parts := strings.Split(ioStats, " ") 350 if len(parts) < 2 { 351 db.log.Error("Bad syntax of ioStats", "ioStats", ioStats) 352 merr = fmt.Errorf("bad syntax of ioStats %s", ioStats) 353 continue 354 } 355 if n, err := fmt.Sscanf(parts[0], "Read(MB):%f", &nRead); n != 1 || err != nil { 356 db.log.Error("Bad syntax of read entry", "entry", parts[0]) 357 merr = err 358 continue 359 } 360 if n, err := fmt.Sscanf(parts[1], "Write(MB):%f", &nWrite); n != 1 || err != nil { 361 db.log.Error("Bad syntax of write entry", "entry", parts[1]) 362 merr = err 363 continue 364 } 365 if db.diskReadMeter != nil { 366 db.diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024)) 367 } 368 if db.diskWriteMeter != nil { 369 db.diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024)) 370 } 371 iostats[0], iostats[1] = nRead, nWrite 372 373 // Sleep a bit, then repeat the stats collection 374 select { 375 case errc = <-db.quitChan: 376 // Quit requesting, stop hammering the database 377 case <-time.After(refresh): 378 // Timeout, gather a new set of stats 379 } 380 } 381 382 if errc == nil { 383 errc = <-db.quitChan 384 } 385 errc <- merr 386 } 387 388 // batch is a write-only leveldb batch that commits changes to its host database 389 // when Write is called. A batch cannot be used concurrently. 390 type batch struct { 391 db *leveldb.DB 392 b *leveldb.Batch 393 size int 394 } 395 396 // Put inserts the given value into the batch for later committing. 397 func (b *batch) Put(key, value []byte) error { 398 b.b.Put(key, value) 399 b.size += len(value) 400 return nil 401 } 402 403 // Delete inserts the a key removal into the batch for later committing. 404 func (b *batch) Delete(key []byte) error { 405 b.b.Delete(key) 406 b.size++ 407 return nil 408 } 409 410 // ValueSize retrieves the amount of data queued up for writing. 411 func (b *batch) ValueSize() int { 412 return b.size 413 } 414 415 // Write flushes any accumulated data to disk. 416 func (b *batch) Write() error { 417 return b.db.Write(b.b, nil) 418 } 419 420 // Reset resets the batch for reuse. 421 func (b *batch) Reset() { 422 b.b.Reset() 423 b.size = 0 424 } 425 426 // Replay replays the batch contents. 427 func (b *batch) Replay(w intdb.Writer) error { 428 return b.b.Replay(&replayer{writer: w}) 429 } 430 431 // replayer is a small wrapper to implement the correct replay methods. 432 type replayer struct { 433 writer intdb.Writer 434 failure error 435 } 436 437 // Put inserts the given value into the key-value data store. 438 func (r *replayer) Put(key, value []byte) { 439 // If the replay already failed, stop executing ops 440 if r.failure != nil { 441 return 442 } 443 r.failure = r.writer.Put(key, value) 444 } 445 446 // Delete removes the key from the key-value data store. 447 func (r *replayer) Delete(key []byte) { 448 // If the replay already failed, stop executing ops 449 if r.failure != nil { 450 return 451 } 452 r.failure = r.writer.Delete(key) 453 }