github.com/cyberliem/go-ethereum@v1.8.17-0.20190531093028-7a22da98b9f8/ethdb/leveldb/leveldb.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // +build !js 18 19 // Package leveldb implements the key-value database layer based on LevelDB. 20 package leveldb 21 22 import ( 23 "fmt" 24 "strconv" 25 "strings" 26 "sync" 27 "time" 28 29 "github.com/ethereum/go-ethereum/common" 30 "github.com/ethereum/go-ethereum/ethdb" 31 "github.com/ethereum/go-ethereum/log" 32 "github.com/ethereum/go-ethereum/metrics" 33 "github.com/syndtr/goleveldb/leveldb" 34 "github.com/syndtr/goleveldb/leveldb/errors" 35 "github.com/syndtr/goleveldb/leveldb/filter" 36 "github.com/syndtr/goleveldb/leveldb/opt" 37 "github.com/syndtr/goleveldb/leveldb/util" 38 ) 39 40 const ( 41 // degradationWarnInterval specifies how often warning should be printed if the 42 // leveldb database cannot keep up with requested writes. 43 degradationWarnInterval = time.Minute 44 45 // minCache is the minimum amount of memory in megabytes to allocate to leveldb 46 // read and write caching, split half and half. 47 minCache = 16 48 49 // minHandles is the minimum number of files handles to allocate to the open 50 // database files. 51 minHandles = 16 52 53 // metricsGatheringInterval specifies the interval to retrieve leveldb database 54 // compaction, io and pause stats to report to the user. 55 metricsGatheringInterval = 3 * time.Second 56 ) 57 58 // Database is a persistent key-value store. Apart from basic data storage 59 // functionality it also supports batch writes and iterating over the keyspace in 60 // binary-alphabetical order. 61 type Database struct { 62 fn string // filename for reporting 63 db *leveldb.DB // LevelDB instance 64 65 compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction 66 compReadMeter metrics.Meter // Meter for measuring the data read during compaction 67 compWriteMeter metrics.Meter // Meter for measuring the data written during compaction 68 writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction 69 writeDelayMeter metrics.Meter // Meter for measuring the write delay duration due to database compaction 70 diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read 71 diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written 72 73 quitLock sync.Mutex // Mutex protecting the quit channel access 74 quitChan chan chan error // Quit channel to stop the metrics collection before closing the database 75 76 log log.Logger // Contextual logger tracking the database path 77 } 78 79 // New returns a wrapped LevelDB object. The namespace is the prefix that the 80 // metrics reporting should use for surfacing internal stats. 81 func New(file string, cache int, handles int, namespace string) (*Database, error) { 82 // Ensure we have some minimal caching and file guarantees 83 if cache < minCache { 84 cache = minCache 85 } 86 if handles < minHandles { 87 handles = minHandles 88 } 89 logger := log.New("database", file) 90 logger.Info("Allocated cache and file handles", "cache", common.StorageSize(cache*1024*1024), "handles", handles) 91 92 // Open the db and recover any potential corruptions 93 db, err := leveldb.OpenFile(file, &opt.Options{ 94 OpenFilesCacheCapacity: handles, 95 BlockCacheCapacity: cache / 2 * opt.MiB, 96 WriteBuffer: cache / 4 * opt.MiB, // Two of these are used internally 97 Filter: filter.NewBloomFilter(10), 98 }) 99 if _, corrupted := err.(*errors.ErrCorrupted); corrupted { 100 db, err = leveldb.RecoverFile(file, nil) 101 } 102 if err != nil { 103 return nil, err 104 } 105 // Assemble the wrapper with all the registered metrics 106 ldb := &Database{ 107 fn: file, 108 db: db, 109 log: logger, 110 quitChan: make(chan chan error), 111 } 112 ldb.compTimeMeter = metrics.NewRegisteredMeter(namespace+"compact/time", nil) 113 ldb.compReadMeter = metrics.NewRegisteredMeter(namespace+"compact/input", nil) 114 ldb.compWriteMeter = metrics.NewRegisteredMeter(namespace+"compact/output", nil) 115 ldb.diskReadMeter = metrics.NewRegisteredMeter(namespace+"disk/read", nil) 116 ldb.diskWriteMeter = metrics.NewRegisteredMeter(namespace+"disk/write", nil) 117 ldb.writeDelayMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/duration", nil) 118 ldb.writeDelayNMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/counter", nil) 119 120 // Start up the metrics gathering and return 121 go ldb.meter(metricsGatheringInterval) 122 return ldb, nil 123 } 124 125 // Close stops the metrics collection, flushes any pending data to disk and closes 126 // all io accesses to the underlying key-value store. 127 func (db *Database) Close() error { 128 db.quitLock.Lock() 129 defer db.quitLock.Unlock() 130 131 if db.quitChan != nil { 132 errc := make(chan error) 133 db.quitChan <- errc 134 if err := <-errc; err != nil { 135 db.log.Error("Metrics collection failed", "err", err) 136 } 137 db.quitChan = nil 138 } 139 return db.db.Close() 140 } 141 142 // Has retrieves if a key is present in the key-value store. 143 func (db *Database) Has(key []byte) (bool, error) { 144 return db.db.Has(key, nil) 145 } 146 147 // Get retrieves the given key if it's present in the key-value store. 148 func (db *Database) Get(key []byte) ([]byte, error) { 149 dat, err := db.db.Get(key, nil) 150 if err != nil { 151 return nil, err 152 } 153 return dat, nil 154 } 155 156 // Put inserts the given value into the key-value store. 157 func (db *Database) Put(key []byte, value []byte) error { 158 return db.db.Put(key, value, nil) 159 } 160 161 // Delete removes the key from the key-value store. 162 func (db *Database) Delete(key []byte) error { 163 return db.db.Delete(key, nil) 164 } 165 166 // NewBatch creates a write-only key-value store that buffers changes to its host 167 // database until a final write is called. 168 func (db *Database) NewBatch() ethdb.Batch { 169 return &batch{ 170 db: db.db, 171 b: new(leveldb.Batch), 172 } 173 } 174 175 // NewIterator creates a binary-alphabetical iterator over the entire keyspace 176 // contained within the leveldb database. 177 func (db *Database) NewIterator() ethdb.Iterator { 178 return db.db.NewIterator(new(util.Range), nil) 179 } 180 181 // NewIteratorWithStart creates a binary-alphabetical iterator over a subset of 182 // database content starting at a particular initial key (or after, if it does 183 // not exist). 184 func (db *Database) NewIteratorWithStart(start []byte) ethdb.Iterator { 185 return db.db.NewIterator(&util.Range{Start: start}, nil) 186 } 187 188 // NewIteratorWithPrefix creates a binary-alphabetical iterator over a subset 189 // of database content with a particular key prefix. 190 func (db *Database) NewIteratorWithPrefix(prefix []byte) ethdb.Iterator { 191 return db.db.NewIterator(util.BytesPrefix(prefix), nil) 192 } 193 194 // Stat returns a particular internal stat of the database. 195 func (db *Database) Stat(property string) (string, error) { 196 return db.db.GetProperty(property) 197 } 198 199 // Compact flattens the underlying data store for the given key range. In essence, 200 // deleted and overwritten versions are discarded, and the data is rearranged to 201 // reduce the cost of operations needed to access them. 202 // 203 // A nil start is treated as a key before all keys in the data store; a nil limit 204 // is treated as a key after all keys in the data store. If both is nil then it 205 // will compact entire data store. 206 func (db *Database) Compact(start []byte, limit []byte) error { 207 return db.db.CompactRange(util.Range{Start: start, Limit: limit}) 208 } 209 210 // Path returns the path to the database directory. 211 func (db *Database) Path() string { 212 return db.fn 213 } 214 215 // meter periodically retrieves internal leveldb counters and reports them to 216 // the metrics subsystem. 217 // 218 // This is how a LevelDB stats table looks like (currently): 219 // Compactions 220 // Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB) 221 // -------+------------+---------------+---------------+---------------+--------------- 222 // 0 | 0 | 0.00000 | 1.27969 | 0.00000 | 12.31098 223 // 1 | 85 | 109.27913 | 28.09293 | 213.92493 | 214.26294 224 // 2 | 523 | 1000.37159 | 7.26059 | 66.86342 | 66.77884 225 // 3 | 570 | 1113.18458 | 0.00000 | 0.00000 | 0.00000 226 // 227 // This is how the write delay look like (currently): 228 // DelayN:5 Delay:406.604657ms Paused: false 229 // 230 // This is how the iostats look like (currently): 231 // Read(MB):3895.04860 Write(MB):3654.64712 232 func (db *Database) meter(refresh time.Duration) { 233 // Create the counters to store current and previous compaction values 234 compactions := make([][]float64, 2) 235 for i := 0; i < 2; i++ { 236 compactions[i] = make([]float64, 3) 237 } 238 // Create storage for iostats. 239 var iostats [2]float64 240 241 // Create storage and warning log tracer for write delay. 242 var ( 243 delaystats [2]int64 244 lastWritePaused time.Time 245 ) 246 247 var ( 248 errc chan error 249 merr error 250 ) 251 252 // Iterate ad infinitum and collect the stats 253 for i := 1; errc == nil && merr == nil; i++ { 254 // Retrieve the database stats 255 stats, err := db.db.GetProperty("leveldb.stats") 256 if err != nil { 257 db.log.Error("Failed to read database stats", "err", err) 258 merr = err 259 continue 260 } 261 // Find the compaction table, skip the header 262 lines := strings.Split(stats, "\n") 263 for len(lines) > 0 && strings.TrimSpace(lines[0]) != "Compactions" { 264 lines = lines[1:] 265 } 266 if len(lines) <= 3 { 267 db.log.Error("Compaction leveldbTable not found") 268 merr = errors.New("compaction leveldbTable not found") 269 continue 270 } 271 lines = lines[3:] 272 273 // Iterate over all the leveldbTable rows, and accumulate the entries 274 for j := 0; j < len(compactions[i%2]); j++ { 275 compactions[i%2][j] = 0 276 } 277 for _, line := range lines { 278 parts := strings.Split(line, "|") 279 if len(parts) != 6 { 280 break 281 } 282 for idx, counter := range parts[3:] { 283 value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64) 284 if err != nil { 285 db.log.Error("Compaction entry parsing failed", "err", err) 286 merr = err 287 continue 288 } 289 compactions[i%2][idx] += value 290 } 291 } 292 // Update all the requested meters 293 if db.compTimeMeter != nil { 294 db.compTimeMeter.Mark(int64((compactions[i%2][0] - compactions[(i-1)%2][0]) * 1000 * 1000 * 1000)) 295 } 296 if db.compReadMeter != nil { 297 db.compReadMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1024 * 1024)) 298 } 299 if db.compWriteMeter != nil { 300 db.compWriteMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024)) 301 } 302 303 // Retrieve the write delay statistic 304 writedelay, err := db.db.GetProperty("leveldb.writedelay") 305 if err != nil { 306 db.log.Error("Failed to read database write delay statistic", "err", err) 307 merr = err 308 continue 309 } 310 var ( 311 delayN int64 312 delayDuration string 313 duration time.Duration 314 paused bool 315 ) 316 if n, err := fmt.Sscanf(writedelay, "DelayN:%d Delay:%s Paused:%t", &delayN, &delayDuration, &paused); n != 3 || err != nil { 317 db.log.Error("Write delay statistic not found") 318 merr = err 319 continue 320 } 321 duration, err = time.ParseDuration(delayDuration) 322 if err != nil { 323 db.log.Error("Failed to parse delay duration", "err", err) 324 merr = err 325 continue 326 } 327 if db.writeDelayNMeter != nil { 328 db.writeDelayNMeter.Mark(delayN - delaystats[0]) 329 } 330 if db.writeDelayMeter != nil { 331 db.writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1]) 332 } 333 // If a warning that db is performing compaction has been displayed, any subsequent 334 // warnings will be withheld for one minute not to overwhelm the user. 335 if paused && delayN-delaystats[0] == 0 && duration.Nanoseconds()-delaystats[1] == 0 && 336 time.Now().After(lastWritePaused.Add(degradationWarnInterval)) { 337 db.log.Warn("Database compacting, degraded performance") 338 lastWritePaused = time.Now() 339 } 340 delaystats[0], delaystats[1] = delayN, duration.Nanoseconds() 341 342 // Retrieve the database iostats. 343 ioStats, err := db.db.GetProperty("leveldb.iostats") 344 if err != nil { 345 db.log.Error("Failed to read database iostats", "err", err) 346 merr = err 347 continue 348 } 349 var nRead, nWrite float64 350 parts := strings.Split(ioStats, " ") 351 if len(parts) < 2 { 352 db.log.Error("Bad syntax of ioStats", "ioStats", ioStats) 353 merr = fmt.Errorf("bad syntax of ioStats %s", ioStats) 354 continue 355 } 356 if n, err := fmt.Sscanf(parts[0], "Read(MB):%f", &nRead); n != 1 || err != nil { 357 db.log.Error("Bad syntax of read entry", "entry", parts[0]) 358 merr = err 359 continue 360 } 361 if n, err := fmt.Sscanf(parts[1], "Write(MB):%f", &nWrite); n != 1 || err != nil { 362 db.log.Error("Bad syntax of write entry", "entry", parts[1]) 363 merr = err 364 continue 365 } 366 if db.diskReadMeter != nil { 367 db.diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024)) 368 } 369 if db.diskWriteMeter != nil { 370 db.diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024)) 371 } 372 iostats[0], iostats[1] = nRead, nWrite 373 374 // Sleep a bit, then repeat the stats collection 375 select { 376 case errc = <-db.quitChan: 377 // Quit requesting, stop hammering the database 378 case <-time.After(refresh): 379 // Timeout, gather a new set of stats 380 } 381 } 382 383 if errc == nil { 384 errc = <-db.quitChan 385 } 386 errc <- merr 387 } 388 389 // batch is a write-only leveldb batch that commits changes to its host database 390 // when Write is called. A batch cannot be used concurrently. 391 type batch struct { 392 db *leveldb.DB 393 b *leveldb.Batch 394 size int 395 } 396 397 // Put inserts the given value into the batch for later committing. 398 func (b *batch) Put(key, value []byte) error { 399 b.b.Put(key, value) 400 b.size += len(value) 401 return nil 402 } 403 404 // Delete inserts the a key removal into the batch for later committing. 405 func (b *batch) Delete(key []byte) error { 406 b.b.Delete(key) 407 b.size++ 408 return nil 409 } 410 411 // ValueSize retrieves the amount of data queued up for writing. 412 func (b *batch) ValueSize() int { 413 return b.size 414 } 415 416 // Write flushes any accumulated data to disk. 417 func (b *batch) Write() error { 418 return b.db.Write(b.b, nil) 419 } 420 421 // Reset resets the batch for reuse. 422 func (b *batch) Reset() { 423 b.b.Reset() 424 b.size = 0 425 } 426 427 // Replay replays the batch contents. 428 func (b *batch) Replay(w ethdb.KeyValueWriter) error { 429 return b.b.Replay(&replayer{writer: w}) 430 } 431 432 // replayer is a small wrapper to implement the correct replay methods. 433 type replayer struct { 434 writer ethdb.KeyValueWriter 435 failure error 436 } 437 438 // Put inserts the given value into the key-value data store. 439 func (r *replayer) Put(key, value []byte) { 440 // If the replay already failed, stop executing ops 441 if r.failure != nil { 442 return 443 } 444 r.failure = r.writer.Put(key, value) 445 } 446 447 // Delete removes the key from the key-value data store. 448 func (r *replayer) Delete(key []byte) { 449 // If the replay already failed, stop executing ops 450 if r.failure != nil { 451 return 452 } 453 r.failure = r.writer.Delete(key) 454 }