github.com/vntchain/go-vnt@v0.6.4-alpha.6/vntdb/database.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package vntdb 18 19 import ( 20 "fmt" 21 "strconv" 22 "strings" 23 "sync" 24 "time" 25 26 "github.com/syndtr/goleveldb/leveldb" 27 "github.com/syndtr/goleveldb/leveldb/errors" 28 "github.com/syndtr/goleveldb/leveldb/filter" 29 "github.com/syndtr/goleveldb/leveldb/iterator" 30 "github.com/syndtr/goleveldb/leveldb/opt" 31 "github.com/syndtr/goleveldb/leveldb/util" 32 "github.com/vntchain/go-vnt/log" 33 "github.com/vntchain/go-vnt/metrics" 34 ) 35 36 const ( 37 writeDelayNThreshold = 200 38 writeDelayThreshold = 350 * time.Millisecond 39 writeDelayWarningThrottler = 1 * time.Minute 40 ) 41 42 var OpenFileLimit = 64 43 44 type LDBDatabase struct { 45 fn string // filename for reporting 46 db *leveldb.DB // LevelDB instance 47 48 compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction 49 compReadMeter metrics.Meter // Meter for measuring the data read during compaction 50 compWriteMeter metrics.Meter // Meter for measuring the data written during compaction 51 writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction 52 writeDelayMeter metrics.Meter // Meter for measuring the write delay duration due to database compaction 53 diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read 54 diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written 55 56 quitLock sync.Mutex // Mutex protecting the quit channel access 57 quitChan chan chan error // Quit channel to stop the metrics collection before closing the database 58 59 log log.Logger // Contextual logger tracking the database path 60 } 61 62 // NewLDBDatabase returns a LevelDB wrapped object. 63 func NewLDBDatabase(file string, cache int, handles int) (*LDBDatabase, error) { 64 logger := log.New("database", file) 65 66 // Ensure we have some minimal caching and file guarantees 67 if cache < 16 { 68 cache = 16 69 } 70 if handles < 16 { 71 handles = 16 72 } 73 logger.Info("Allocated cache and file handles", "cache", cache, "handles", handles) 74 75 // Open the db and recover any potential corruptions 76 db, err := leveldb.OpenFile(file, &opt.Options{ 77 OpenFilesCacheCapacity: handles, 78 BlockCacheCapacity: cache / 2 * opt.MiB, 79 WriteBuffer: cache / 4 * opt.MiB, // Two of these are used internally 80 Filter: filter.NewBloomFilter(10), 81 }) 82 if _, corrupted := err.(*errors.ErrCorrupted); corrupted { 83 db, err = leveldb.RecoverFile(file, nil) 84 } 85 // (Re)check for errors and abort if opening of the db failed 86 if err != nil { 87 return nil, err 88 } 89 return &LDBDatabase{ 90 fn: file, 91 db: db, 92 log: logger, 93 }, nil 94 } 95 96 // Path returns the path to the database directory. 97 func (db *LDBDatabase) Path() string { 98 return db.fn 99 } 100 101 // Put puts the given key / value to the queue 102 func (db *LDBDatabase) Put(key []byte, value []byte) error { 103 return db.db.Put(key, value, nil) 104 } 105 106 func (db *LDBDatabase) Has(key []byte) (bool, error) { 107 return db.db.Has(key, nil) 108 } 109 110 // Get returns the given key if it's present. 111 func (db *LDBDatabase) Get(key []byte) ([]byte, error) { 112 dat, err := db.db.Get(key, nil) 113 if err != nil { 114 return nil, err 115 } 116 return dat, nil 117 } 118 119 // Delete deletes the key from the queue and database 120 func (db *LDBDatabase) Delete(key []byte) error { 121 return db.db.Delete(key, nil) 122 } 123 124 func (db *LDBDatabase) NewIterator() iterator.Iterator { 125 return db.db.NewIterator(nil, nil) 126 } 127 128 // NewIteratorWithPrefix returns a iterator to iterate over subset of database content with a particular prefix. 129 func (db *LDBDatabase) NewIteratorWithPrefix(prefix []byte) iterator.Iterator { 130 return db.db.NewIterator(util.BytesPrefix(prefix), nil) 131 } 132 133 func (db *LDBDatabase) Close() { 134 // Stop the metrics collection to avoid internal database races 135 db.quitLock.Lock() 136 defer db.quitLock.Unlock() 137 138 if db.quitChan != nil { 139 errc := make(chan error) 140 db.quitChan <- errc 141 if err := <-errc; err != nil { 142 db.log.Error("Metrics collection failed", "err", err) 143 } 144 db.quitChan = nil 145 } 146 err := db.db.Close() 147 if err == nil { 148 db.log.Info("Database closed") 149 } else { 150 db.log.Error("Failed to close database", "err", err) 151 } 152 } 153 154 func (db *LDBDatabase) LDB() *leveldb.DB { 155 return db.db 156 } 157 158 // Meter configures the database metrics collectors and 159 func (db *LDBDatabase) Meter(prefix string) { 160 if metrics.Enabled { 161 // Initialize all the metrics collector at the requested prefix 162 db.compTimeMeter = metrics.NewRegisteredMeter(prefix+"compact/time", nil) 163 db.compReadMeter = metrics.NewRegisteredMeter(prefix+"compact/input", nil) 164 db.compWriteMeter = metrics.NewRegisteredMeter(prefix+"compact/output", nil) 165 db.diskReadMeter = metrics.NewRegisteredMeter(prefix+"disk/read", nil) 166 db.diskWriteMeter = metrics.NewRegisteredMeter(prefix+"disk/write", nil) 167 } 168 // Initialize write delay metrics no matter we are in metric mode or not. 169 db.writeDelayMeter = metrics.NewRegisteredMeter(prefix+"compact/writedelay/duration", nil) 170 db.writeDelayNMeter = metrics.NewRegisteredMeter(prefix+"compact/writedelay/counter", nil) 171 172 // Create a quit channel for the periodic collector and run it 173 db.quitLock.Lock() 174 db.quitChan = make(chan chan error) 175 db.quitLock.Unlock() 176 177 go db.meter(3 * time.Second) 178 } 179 180 // meter periodically retrieves internal leveldb counters and reports them to 181 // the metrics subsystem. 182 // 183 // This is how a stats table look like (currently): 184 // Compactions 185 // Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB) 186 // -------+------------+---------------+---------------+---------------+--------------- 187 // 0 | 0 | 0.00000 | 1.27969 | 0.00000 | 12.31098 188 // 1 | 85 | 109.27913 | 28.09293 | 213.92493 | 214.26294 189 // 2 | 523 | 1000.37159 | 7.26059 | 66.86342 | 66.77884 190 // 3 | 570 | 1113.18458 | 0.00000 | 0.00000 | 0.00000 191 // 192 // This is how the write delay look like (currently): 193 // DelayN:5 Delay:406.604657ms Paused: false 194 // 195 // This is how the iostats look like (currently): 196 // Read(MB):3895.04860 Write(MB):3654.64712 197 func (db *LDBDatabase) meter(refresh time.Duration) { 198 // Create the counters to store current and previous compaction values 199 compactions := make([][]float64, 2) 200 for i := 0; i < 2; i++ { 201 compactions[i] = make([]float64, 3) 202 } 203 // Create storage for iostats. 204 var iostats [2]float64 205 206 // Create storage and warning log tracer for write delay. 207 var ( 208 delaystats [2]int64 209 lastWriteDelay time.Time 210 lastWriteDelayN time.Time 211 lastWritePaused time.Time 212 ) 213 214 var ( 215 errc chan error 216 merr error 217 ) 218 219 // Iterate ad infinitum and collect the stats 220 for i := 1; errc == nil && merr == nil; i++ { 221 // Retrieve the database stats 222 stats, err := db.db.GetProperty("leveldb.stats") 223 if err != nil { 224 db.log.Error("Failed to read database stats", "err", err) 225 merr = err 226 continue 227 } 228 // Find the compaction table, skip the header 229 lines := strings.Split(stats, "\n") 230 for len(lines) > 0 && strings.TrimSpace(lines[0]) != "Compactions" { 231 lines = lines[1:] 232 } 233 if len(lines) <= 3 { 234 db.log.Error("Compaction table not found") 235 merr = errors.New("compaction table not found") 236 continue 237 } 238 lines = lines[3:] 239 240 // Iterate over all the table rows, and accumulate the entries 241 for j := 0; j < len(compactions[i%2]); j++ { 242 compactions[i%2][j] = 0 243 } 244 for _, line := range lines { 245 parts := strings.Split(line, "|") 246 if len(parts) != 6 { 247 break 248 } 249 for idx, counter := range parts[3:] { 250 value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64) 251 if err != nil { 252 db.log.Error("Compaction entry parsing failed", "err", err) 253 merr = err 254 continue 255 } 256 compactions[i%2][idx] += value 257 } 258 } 259 // Update all the requested meters 260 if db.compTimeMeter != nil { 261 db.compTimeMeter.Mark(int64((compactions[i%2][0] - compactions[(i-1)%2][0]) * 1000 * 1000 * 1000)) 262 } 263 if db.compReadMeter != nil { 264 db.compReadMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1024 * 1024)) 265 } 266 if db.compWriteMeter != nil { 267 db.compWriteMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024)) 268 } 269 270 // Retrieve the write delay statistic 271 writedelay, err := db.db.GetProperty("leveldb.writedelay") 272 if err != nil { 273 db.log.Error("Failed to read database write delay statistic", "err", err) 274 merr = err 275 continue 276 } 277 var ( 278 delayN int64 279 delayDuration string 280 duration time.Duration 281 paused bool 282 ) 283 if n, err := fmt.Sscanf(writedelay, "DelayN:%d Delay:%s Paused:%t", &delayN, &delayDuration, &paused); n != 3 || err != nil { 284 db.log.Error("Write delay statistic not found") 285 merr = err 286 continue 287 } 288 duration, err = time.ParseDuration(delayDuration) 289 if err != nil { 290 db.log.Error("Failed to parse delay duration", "err", err) 291 merr = err 292 continue 293 } 294 if db.writeDelayNMeter != nil { 295 db.writeDelayNMeter.Mark(delayN - delaystats[0]) 296 // If the write delay number been collected in the last minute exceeds the predefined threshold, 297 // print a warning log here. 298 // If a warning that db performance is laggy has been displayed, 299 // any subsequent warnings will be withhold for 1 minute to don't overwhelm the user. 300 if int(db.writeDelayNMeter.Rate1()) > writeDelayNThreshold && 301 time.Now().After(lastWriteDelayN.Add(writeDelayWarningThrottler)) { 302 db.log.Warn("Write delay number exceeds the threshold (200 per second) in the last minute") 303 lastWriteDelayN = time.Now() 304 } 305 } 306 if db.writeDelayMeter != nil { 307 db.writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1]) 308 // If the write delay duration been collected in the last minute exceeds the predefined threshold, 309 // print a warning log here. 310 // If a warning that db performance is laggy has been displayed, 311 // any subsequent warnings will be withhold for 1 minute to don't overwhelm the user. 312 if int64(db.writeDelayMeter.Rate1()) > writeDelayThreshold.Nanoseconds() && 313 time.Now().After(lastWriteDelay.Add(writeDelayWarningThrottler)) { 314 db.log.Warn("Write delay duration exceeds the threshold (35% of the time) in the last minute") 315 lastWriteDelay = time.Now() 316 } 317 } 318 // If a warning that db is performing compaction has been displayed, any subsequent 319 // warnings will be withheld for one minute not to overwhelm the user. 320 if paused && delayN-delaystats[0] == 0 && duration.Nanoseconds()-delaystats[1] == 0 && 321 time.Now().After(lastWritePaused.Add(writeDelayWarningThrottler)) { 322 db.log.Warn("Database compacting, degraded performance") 323 lastWritePaused = time.Now() 324 } 325 326 delaystats[0], delaystats[1] = delayN, duration.Nanoseconds() 327 328 // Retrieve the database iostats. 329 ioStats, err := db.db.GetProperty("leveldb.iostats") 330 if err != nil { 331 db.log.Error("Failed to read database iostats", "err", err) 332 merr = err 333 continue 334 } 335 var nRead, nWrite float64 336 parts := strings.Split(ioStats, " ") 337 if len(parts) < 2 { 338 db.log.Error("Bad syntax of ioStats", "ioStats", ioStats) 339 merr = fmt.Errorf("bad syntax of ioStats %s", ioStats) 340 continue 341 } 342 if n, err := fmt.Sscanf(parts[0], "Read(MB):%f", &nRead); n != 1 || err != nil { 343 db.log.Error("Bad syntax of read entry", "entry", parts[0]) 344 merr = err 345 continue 346 } 347 if n, err := fmt.Sscanf(parts[1], "Write(MB):%f", &nWrite); n != 1 || err != nil { 348 db.log.Error("Bad syntax of write entry", "entry", parts[1]) 349 merr = err 350 continue 351 } 352 if db.diskReadMeter != nil { 353 db.diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024)) 354 } 355 if db.diskWriteMeter != nil { 356 db.diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024)) 357 } 358 iostats[0], iostats[1] = nRead, nWrite 359 360 // Sleep a bit, then repeat the stats collection 361 select { 362 case errc = <-db.quitChan: 363 // Quit requesting, stop hammering the database 364 case <-time.After(refresh): 365 // Timeout, gather a new set of stats 366 } 367 } 368 369 if errc == nil { 370 errc = <-db.quitChan 371 } 372 errc <- merr 373 } 374 375 func (db *LDBDatabase) NewBatch() Batch { 376 return &ldbBatch{db: db.db, b: new(leveldb.Batch)} 377 } 378 379 type ldbBatch struct { 380 db *leveldb.DB 381 b *leveldb.Batch 382 size int 383 } 384 385 func (b *ldbBatch) Put(key, value []byte) error { 386 b.b.Put(key, value) 387 b.size += len(value) 388 return nil 389 } 390 391 func (b *ldbBatch) Write() error { 392 return b.db.Write(b.b, nil) 393 } 394 395 func (b *ldbBatch) ValueSize() int { 396 return b.size 397 } 398 399 func (b *ldbBatch) Reset() { 400 b.b.Reset() 401 b.size = 0 402 } 403 404 type table struct { 405 db Database 406 prefix string 407 } 408 409 // NewTable returns a Database object that prefixes all keys with a given 410 // string. 411 func NewTable(db Database, prefix string) Database { 412 return &table{ 413 db: db, 414 prefix: prefix, 415 } 416 } 417 418 func (dt *table) Put(key []byte, value []byte) error { 419 return dt.db.Put(append([]byte(dt.prefix), key...), value) 420 } 421 422 func (dt *table) Has(key []byte) (bool, error) { 423 return dt.db.Has(append([]byte(dt.prefix), key...)) 424 } 425 426 func (dt *table) Get(key []byte) ([]byte, error) { 427 return dt.db.Get(append([]byte(dt.prefix), key...)) 428 } 429 430 func (dt *table) Delete(key []byte) error { 431 return dt.db.Delete(append([]byte(dt.prefix), key...)) 432 } 433 434 func (dt *table) Close() { 435 // Do nothing; don't close the underlying DB. 436 } 437 438 type tableBatch struct { 439 batch Batch 440 prefix string 441 } 442 443 // NewTableBatch returns a Batch object which prefixes all keys with a given string. 444 func NewTableBatch(db Database, prefix string) Batch { 445 return &tableBatch{db.NewBatch(), prefix} 446 } 447 448 func (dt *table) NewBatch() Batch { 449 return &tableBatch{dt.db.NewBatch(), dt.prefix} 450 } 451 452 func (tb *tableBatch) Put(key, value []byte) error { 453 return tb.batch.Put(append([]byte(tb.prefix), key...), value) 454 } 455 456 func (tb *tableBatch) Write() error { 457 return tb.batch.Write() 458 } 459 460 func (tb *tableBatch) ValueSize() int { 461 return tb.batch.ValueSize() 462 } 463 464 func (tb *tableBatch) Reset() { 465 tb.batch.Reset() 466 }