github.com/Gessiux/neatchain@v1.3.1/neatdb/leveldb/leveldb.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // +build !js 18 19 // Package leveldb implements the key-value database layer based on LevelDB. 20 package leveldb 21 22 import ( 23 "fmt" 24 "strconv" 25 "strings" 26 "sync" 27 "time" 28 29 "github.com/Gessiux/neatchain/chain/log" 30 "github.com/Gessiux/neatchain/neatdb" 31 "github.com/Gessiux/neatchain/utilities/metrics" 32 "github.com/syndtr/goleveldb/leveldb" 33 "github.com/syndtr/goleveldb/leveldb/errors" 34 "github.com/syndtr/goleveldb/leveldb/filter" 35 "github.com/syndtr/goleveldb/leveldb/opt" 36 "github.com/syndtr/goleveldb/leveldb/util" 37 ) 38 39 const ( 40 // degradationWarnInterval specifies how often warning should be printed if the 41 // leveldb database cannot keep up with requested writes. 42 degradationWarnInterval = time.Minute 43 44 // minCache is the minimum amount of memory in megabytes to allocate to leveldb 45 // read and write caching, split half and half. 46 minCache = 16 47 48 // minHandles is the minimum number of files handles to allocate to the open 49 // database files. 50 minHandles = 16 51 52 // metricsGatheringInterval specifies the interval to retrieve leveldb database 53 // compaction, io and pause stats to report to the user. 54 metricsGatheringInterval = 3 * time.Second 55 ) 56 57 // Database is a persistent key-value store. Apart from basic data storage 58 // functionality it also supports batch writes and iterating over the keyspace in 59 // binary-alphabetical order. 60 type Database struct { 61 fn string // filename for reporting 62 db *leveldb.DB // LevelDB instance 63 64 compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction 65 compReadMeter metrics.Meter // Meter for measuring the data read during compaction 66 compWriteMeter metrics.Meter // Meter for measuring the data written during compaction 67 writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction 68 writeDelayMeter metrics.Meter // Meter for measuring the write delay duration due to database compaction 69 diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read 70 diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written 71 72 quitLock sync.Mutex // Mutex protecting the quit channel access 73 quitChan chan chan error // Quit channel to stop the metrics collection before closing the database 74 75 log log.Logger // Contextual logger tracking the database path 76 } 77 78 // New returns a wrapped LevelDB object. The namespace is the prefix that the 79 // metrics reporting should use for surfacing internal stats. 80 func New(file string, cache int, handles int, namespace string) (*Database, error) { 81 // Ensure we have some minimal caching and file guarantees 82 if cache < minCache { 83 cache = minCache 84 } 85 if handles < minHandles { 86 handles = minHandles 87 } 88 logger := log.New("database", file) 89 //logger.Info("Allocated cache and file handles", "cache", common.StorageSize(cache*1024*1024), "handles", handles) 90 91 // Open the db and recover any potential corruptions 92 db, err := leveldb.OpenFile(file, &opt.Options{ 93 OpenFilesCacheCapacity: handles, 94 BlockCacheCapacity: cache / 2 * opt.MiB, 95 WriteBuffer: cache / 4 * opt.MiB, // Two of these are used internally 96 Filter: filter.NewBloomFilter(10), 97 }) 98 if _, corrupted := err.(*errors.ErrCorrupted); corrupted { 99 db, err = leveldb.RecoverFile(file, nil) 100 } 101 if err != nil { 102 return nil, err 103 } 104 // Assemble the wrapper with all the registered metrics 105 ldb := &Database{ 106 fn: file, 107 db: db, 108 log: logger, 109 quitChan: make(chan chan error), 110 } 111 ldb.compTimeMeter = metrics.NewRegisteredMeter(namespace+"compact/time", nil) 112 ldb.compReadMeter = metrics.NewRegisteredMeter(namespace+"compact/input", nil) 113 ldb.compWriteMeter = metrics.NewRegisteredMeter(namespace+"compact/output", nil) 114 ldb.diskReadMeter = metrics.NewRegisteredMeter(namespace+"disk/read", nil) 115 ldb.diskWriteMeter = metrics.NewRegisteredMeter(namespace+"disk/write", nil) 116 ldb.writeDelayMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/duration", nil) 117 ldb.writeDelayNMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/counter", nil) 118 119 // Start up the metrics gathering and return 120 go ldb.meter(metricsGatheringInterval) 121 return ldb, nil 122 } 123 124 // Close stops the metrics collection, flushes any pending data to disk and closes 125 // all io accesses to the underlying key-value store. 126 func (db *Database) Close() error { 127 db.quitLock.Lock() 128 defer db.quitLock.Unlock() 129 130 if db.quitChan != nil { 131 errc := make(chan error) 132 db.quitChan <- errc 133 if err := <-errc; err != nil { 134 db.log.Error("Metrics collection failed", "err", err) 135 } 136 db.quitChan = nil 137 } 138 err := db.db.Close() 139 if err == nil { 140 db.log.Info("Database closed") 141 } else { 142 db.log.Error("Failed to close database", "err", err) 143 } 144 return err 145 } 146 147 // Has retrieves if a key is present in the key-value store. 148 func (db *Database) Has(key []byte) (bool, error) { 149 return db.db.Has(key, nil) 150 } 151 152 // Get retrieves the given key if it's present in the key-value store. 153 func (db *Database) Get(key []byte) ([]byte, error) { 154 dat, err := db.db.Get(key, nil) 155 if err != nil { 156 return nil, err 157 } 158 return dat, nil 159 } 160 161 // Put inserts the given value into the key-value store. 162 func (db *Database) Put(key []byte, value []byte) error { 163 return db.db.Put(key, value, nil) 164 } 165 166 // Delete removes the key from the key-value store. 167 func (db *Database) Delete(key []byte) error { 168 return db.db.Delete(key, nil) 169 } 170 171 // NewBatch creates a write-only key-value store that buffers changes to its host 172 // database until a final write is called. 173 func (db *Database) NewBatch() neatdb.Batch { 174 return &batch{ 175 db: db.db, 176 b: new(leveldb.Batch), 177 } 178 } 179 180 // NewIterator creates a binary-alphabetical iterator over the entire keyspace 181 // contained within the leveldb database. 182 func (db *Database) NewIterator() neatdb.Iterator { 183 return db.NewIteratorWithPrefix(nil) 184 } 185 186 // NewIteratorWithPrefix creates a binary-alphabetical iterator over a subset 187 // of database content with a particular key prefix. 188 func (db *Database) NewIteratorWithPrefix(prefix []byte) neatdb.Iterator { 189 return db.db.NewIterator(util.BytesPrefix(prefix), nil) 190 } 191 192 // Stat returns a particular internal stat of the database. 193 func (db *Database) Stat(property string) (string, error) { 194 return db.db.GetProperty(property) 195 } 196 197 // Compact flattens the underlying data store for the given key range. In essence, 198 // deleted and overwritten versions are discarded, and the data is rearranged to 199 // reduce the cost of operations needed to access them. 200 // 201 // A nil start is treated as a key before all keys in the data store; a nil limit 202 // is treated as a key after all keys in the data store. If both is nil then it 203 // will compact entire data store. 204 func (db *Database) Compact(start []byte, limit []byte) error { 205 return db.db.CompactRange(util.Range{Start: start, Limit: limit}) 206 } 207 208 // Path returns the path to the database directory. 209 func (db *Database) Path() string { 210 return db.fn 211 } 212 213 // meter periodically retrieves internal leveldb counters and reports them to 214 // the metrics subsystem. 215 // 216 // This is how a LevelDB stats table looks like (currently): 217 // Compactions 218 // Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB) 219 // -------+------------+---------------+---------------+---------------+--------------- 220 // 0 | 0 | 0.00000 | 1.27969 | 0.00000 | 12.31098 221 // 1 | 85 | 109.27913 | 28.09293 | 213.92493 | 214.26294 222 // 2 | 523 | 1000.37159 | 7.26059 | 66.86342 | 66.77884 223 // 3 | 570 | 1113.18458 | 0.00000 | 0.00000 | 0.00000 224 // 225 // This is how the write delay look like (currently): 226 // DelayN:5 Delay:406.604657ms Paused: false 227 // 228 // This is how the iostats look like (currently): 229 // Read(MB):3895.04860 Write(MB):3654.64712 230 func (db *Database) meter(refresh time.Duration) { 231 // Create the counters to store current and previous compaction values 232 compactions := make([][]float64, 2) 233 for i := 0; i < 2; i++ { 234 compactions[i] = make([]float64, 3) 235 } 236 // Create storage for iostats. 237 var iostats [2]float64 238 239 // Create storage and warning log tracer for write delay. 240 var ( 241 delaystats [2]int64 242 lastWritePaused time.Time 243 ) 244 245 var ( 246 errc chan error 247 merr error 248 ) 249 250 // Iterate ad infinitum and collect the stats 251 for i := 1; errc == nil && merr == nil; i++ { 252 // Retrieve the database stats 253 stats, err := db.db.GetProperty("leveldb.stats") 254 if err != nil { 255 db.log.Error("Failed to read database stats", "err", err) 256 merr = err 257 continue 258 } 259 // Find the compaction table, skip the header 260 lines := strings.Split(stats, "\n") 261 for len(lines) > 0 && strings.TrimSpace(lines[0]) != "Compactions" { 262 lines = lines[1:] 263 } 264 if len(lines) <= 3 { 265 db.log.Error("Compaction leveldbTable not found") 266 merr = errors.New("compaction leveldbTable not found") 267 continue 268 } 269 lines = lines[3:] 270 271 // Iterate over all the leveldbTable rows, and accumulate the entries 272 for j := 0; j < len(compactions[i%2]); j++ { 273 compactions[i%2][j] = 0 274 } 275 for _, line := range lines { 276 parts := strings.Split(line, "|") 277 if len(parts) != 6 { 278 break 279 } 280 for idx, counter := range parts[3:] { 281 value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64) 282 if err != nil { 283 db.log.Error("Compaction entry parsing failed", "err", err) 284 merr = err 285 continue 286 } 287 compactions[i%2][idx] += value 288 } 289 } 290 // Update all the requested meters 291 if db.compTimeMeter != nil { 292 db.compTimeMeter.Mark(int64((compactions[i%2][0] - compactions[(i-1)%2][0]) * 1000 * 1000 * 1000)) 293 } 294 if db.compReadMeter != nil { 295 db.compReadMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1024 * 1024)) 296 } 297 if db.compWriteMeter != nil { 298 db.compWriteMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024)) 299 } 300 301 // Retrieve the write delay statistic 302 writedelay, err := db.db.GetProperty("leveldb.writedelay") 303 if err != nil { 304 db.log.Error("Failed to read database write delay statistic", "err", err) 305 merr = err 306 continue 307 } 308 var ( 309 delayN int64 310 delayDuration string 311 duration time.Duration 312 paused bool 313 ) 314 if n, err := fmt.Sscanf(writedelay, "DelayN:%d Delay:%s Paused:%t", &delayN, &delayDuration, &paused); n != 3 || err != nil { 315 db.log.Error("Write delay statistic not found") 316 merr = err 317 continue 318 } 319 duration, err = time.ParseDuration(delayDuration) 320 if err != nil { 321 db.log.Error("Failed to parse delay duration", "err", err) 322 merr = err 323 continue 324 } 325 if db.writeDelayNMeter != nil { 326 db.writeDelayNMeter.Mark(delayN - delaystats[0]) 327 } 328 if db.writeDelayMeter != nil { 329 db.writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1]) 330 } 331 // If a warning that db is performing compaction has been displayed, any subsequent 332 // warnings will be withheld for one minute not to overwhelm the user. 333 if paused && delayN-delaystats[0] == 0 && duration.Nanoseconds()-delaystats[1] == 0 && 334 time.Now().After(lastWritePaused.Add(degradationWarnInterval)) { 335 db.log.Warn("Database compacting, degraded performance") 336 lastWritePaused = time.Now() 337 } 338 delaystats[0], delaystats[1] = delayN, duration.Nanoseconds() 339 340 // Retrieve the database iostats. 341 ioStats, err := db.db.GetProperty("leveldb.iostats") 342 if err != nil { 343 db.log.Error("Failed to read database iostats", "err", err) 344 merr = err 345 continue 346 } 347 var nRead, nWrite float64 348 parts := strings.Split(ioStats, " ") 349 if len(parts) < 2 { 350 db.log.Error("Bad syntax of ioStats", "ioStats", ioStats) 351 merr = fmt.Errorf("bad syntax of ioStats %s", ioStats) 352 continue 353 } 354 if n, err := fmt.Sscanf(parts[0], "Read(MB):%f", &nRead); n != 1 || err != nil { 355 db.log.Error("Bad syntax of read entry", "entry", parts[0]) 356 merr = err 357 continue 358 } 359 if n, err := fmt.Sscanf(parts[1], "Write(MB):%f", &nWrite); n != 1 || err != nil { 360 db.log.Error("Bad syntax of write entry", "entry", parts[1]) 361 merr = err 362 continue 363 } 364 if db.diskReadMeter != nil { 365 db.diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024)) 366 } 367 if db.diskWriteMeter != nil { 368 db.diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024)) 369 } 370 iostats[0], iostats[1] = nRead, nWrite 371 372 // Sleep a bit, then repeat the stats collection 373 select { 374 case errc = <-db.quitChan: 375 // Quit requesting, stop hammering the database 376 case <-time.After(refresh): 377 // Timeout, gather a new set of stats 378 } 379 } 380 381 if errc == nil { 382 errc = <-db.quitChan 383 } 384 errc <- merr 385 } 386 387 // batch is a write-only leveldb batch that commits changes to its host database 388 // when Write is called. A batch cannot be used concurrently. 389 type batch struct { 390 db *leveldb.DB 391 b *leveldb.Batch 392 size int 393 } 394 395 // Put inserts the given value into the batch for later committing. 396 func (b *batch) Put(key, value []byte) error { 397 b.b.Put(key, value) 398 b.size += len(value) 399 return nil 400 } 401 402 // Delete inserts the a key removal into the batch for later committing. 403 func (b *batch) Delete(key []byte) error { 404 b.b.Delete(key) 405 b.size++ 406 return nil 407 } 408 409 // ValueSize retrieves the amount of data queued up for writing. 410 func (b *batch) ValueSize() int { 411 return b.size 412 } 413 414 // Write flushes any accumulated data to disk. 415 func (b *batch) Write() error { 416 return b.db.Write(b.b, nil) 417 } 418 419 // Reset resets the batch for reuse. 420 func (b *batch) Reset() { 421 b.b.Reset() 422 b.size = 0 423 } 424 425 // Replay replays the batch contents. 426 func (b *batch) Replay(w neatdb.Writer) error { 427 return b.b.Replay(&replayer{writer: w}) 428 } 429 430 // replayer is a small wrapper to implement the correct replay methods. 431 type replayer struct { 432 writer neatdb.Writer 433 failure error 434 } 435 436 // Put inserts the given value into the key-value data store. 437 func (r *replayer) Put(key, value []byte) { 438 // If the replay already failed, stop executing ops 439 if r.failure != nil { 440 return 441 } 442 r.failure = r.writer.Put(key, value) 443 } 444 445 // Delete removes the key from the key-value data store. 446 func (r *replayer) Delete(key []byte) { 447 // If the replay already failed, stop executing ops 448 if r.failure != nil { 449 return 450 } 451 r.failure = r.writer.Delete(key) 452 }