github.com/cranelv/ethereum_mpc@v0.0.0-20191031014521-23aeb1415092/ethdb/database.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package ethdb 18 19 import ( 20 "fmt" 21 "strconv" 22 "strings" 23 "sync" 24 "time" 25 26 "github.com/ethereum/go-ethereum/log" 27 "github.com/ethereum/go-ethereum/metrics" 28 "github.com/syndtr/goleveldb/leveldb" 29 "github.com/syndtr/goleveldb/leveldb/errors" 30 "github.com/syndtr/goleveldb/leveldb/filter" 31 "github.com/syndtr/goleveldb/leveldb/iterator" 32 "github.com/syndtr/goleveldb/leveldb/opt" 33 "github.com/syndtr/goleveldb/leveldb/util" 34 ) 35 36 const ( 37 writeDelayNThreshold = 200 38 writeDelayThreshold = 350 * time.Millisecond 39 writeDelayWarningThrottler = 1 * time.Minute 40 ) 41 42 var OpenFileLimit = 64 43 44 type LDBDatabase struct { 45 fn string // filename for reporting 46 db *leveldb.DB // LevelDB instance 47 48 compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction 49 compReadMeter metrics.Meter // Meter for measuring the data read during compaction 50 compWriteMeter metrics.Meter // Meter for measuring the data written during compaction 51 writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction 52 writeDelayMeter metrics.Meter // Meter for measuring the write delay duration due to database compaction 53 diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read 54 diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written 55 56 quitLock sync.Mutex // Mutex protecting the quit channel access 57 quitChan chan chan error // Quit channel to stop the metrics collection before closing the database 58 59 log log.Logger // Contextual logger tracking the database path 60 } 61 62 // NewLDBDatabase returns a LevelDB wrapped object. 63 func NewLDBDatabase(file string, cache int, handles int) (*LDBDatabase, error) { 64 logger := log.New("database", file) 65 66 // Ensure we have some minimal caching and file guarantees 67 if cache < 16 { 68 cache = 16 69 } 70 if handles < 16 { 71 handles = 16 72 } 73 logger.Info("Allocated cache and file handles", "cache", cache, "handles", handles) 74 75 // Open the db and recover any potential corruptions 76 db, err := leveldb.OpenFile(file, &opt.Options{ 77 OpenFilesCacheCapacity: handles, 78 BlockCacheCapacity: cache / 2 * opt.MiB, 79 WriteBuffer: cache / 4 * opt.MiB, // Two of these are used internally 80 Filter: filter.NewBloomFilter(10), 81 }) 82 if _, corrupted := err.(*errors.ErrCorrupted); corrupted { 83 db, err = leveldb.RecoverFile(file, nil) 84 } 85 // (Re)check for errors and abort if opening of the db failed 86 if err != nil { 87 return nil, err 88 } 89 return &LDBDatabase{ 90 fn: file, 91 db: db, 92 log: logger, 93 }, nil 94 } 95 96 // Path returns the path to the database directory. 97 func (db *LDBDatabase) Path() string { 98 return db.fn 99 } 100 101 // Put puts the given key / value to the queue 102 func (db *LDBDatabase) Put(key []byte, value []byte) error { 103 return db.db.Put(key, value, nil) 104 } 105 106 func (db *LDBDatabase) Has(key []byte) (bool, error) { 107 return db.db.Has(key, nil) 108 } 109 110 // Get returns the given key if it's present. 111 func (db *LDBDatabase) Get(key []byte) ([]byte, error) { 112 dat, err := db.db.Get(key, nil) 113 if err != nil { 114 return nil, err 115 } 116 return dat, nil 117 } 118 119 // Delete deletes the key from the queue and database 120 func (db *LDBDatabase) Delete(key []byte) error { 121 return db.db.Delete(key, nil) 122 } 123 124 func (db *LDBDatabase) NewIterator() iterator.Iterator { 125 return db.db.NewIterator(nil, nil) 126 } 127 128 // NewIteratorWithPrefix returns a iterator to iterate over subset of database content with a particular prefix. 129 func (db *LDBDatabase) NewIteratorWithPrefix(prefix []byte) iterator.Iterator { 130 return db.db.NewIterator(util.BytesPrefix(prefix), nil) 131 } 132 133 func (db *LDBDatabase) Close() { 134 // Stop the metrics collection to avoid internal database races 135 db.quitLock.Lock() 136 defer db.quitLock.Unlock() 137 138 if db.quitChan != nil { 139 errc := make(chan error) 140 db.quitChan <- errc 141 if err := <-errc; err != nil { 142 db.log.Error("Metrics collection failed", "err", err) 143 } 144 } 145 err := db.db.Close() 146 if err == nil { 147 db.log.Info("Database closed") 148 } else { 149 db.log.Error("Failed to close database", "err", err) 150 } 151 } 152 153 func (db *LDBDatabase) LDB() *leveldb.DB { 154 return db.db 155 } 156 157 // Meter configures the database metrics collectors and 158 func (db *LDBDatabase) Meter(prefix string) { 159 if metrics.Enabled { 160 // Initialize all the metrics collector at the requested prefix 161 db.compTimeMeter = metrics.NewRegisteredMeter(prefix+"compact/time", nil) 162 db.compReadMeter = metrics.NewRegisteredMeter(prefix+"compact/input", nil) 163 db.compWriteMeter = metrics.NewRegisteredMeter(prefix+"compact/output", nil) 164 db.diskReadMeter = metrics.NewRegisteredMeter(prefix+"disk/read", nil) 165 db.diskWriteMeter = metrics.NewRegisteredMeter(prefix+"disk/write", nil) 166 } 167 // Initialize write delay metrics no matter we are in metric mode or not. 168 db.writeDelayMeter = metrics.NewRegisteredMeter(prefix+"compact/writedelay/duration", nil) 169 db.writeDelayNMeter = metrics.NewRegisteredMeter(prefix+"compact/writedelay/counter", nil) 170 171 // Create a quit channel for the periodic collector and run it 172 db.quitLock.Lock() 173 db.quitChan = make(chan chan error) 174 db.quitLock.Unlock() 175 176 go db.meter(3 * time.Second) 177 } 178 179 // meter periodically retrieves internal leveldb counters and reports them to 180 // the metrics subsystem. 181 // 182 // This is how a stats table look like (currently): 183 // Compactions 184 // Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB) 185 // -------+------------+---------------+---------------+---------------+--------------- 186 // 0 | 0 | 0.00000 | 1.27969 | 0.00000 | 12.31098 187 // 1 | 85 | 109.27913 | 28.09293 | 213.92493 | 214.26294 188 // 2 | 523 | 1000.37159 | 7.26059 | 66.86342 | 66.77884 189 // 3 | 570 | 1113.18458 | 0.00000 | 0.00000 | 0.00000 190 // 191 // This is how the write delay look like (currently): 192 // DelayN:5 Delay:406.604657ms 193 // 194 // This is how the iostats look like (currently): 195 // Read(MB):3895.04860 Write(MB):3654.64712 196 func (db *LDBDatabase) meter(refresh time.Duration) { 197 // Create the counters to store current and previous compaction values 198 compactions := make([][]float64, 2) 199 for i := 0; i < 2; i++ { 200 compactions[i] = make([]float64, 3) 201 } 202 // Create storage for iostats. 203 var iostats [2]float64 204 205 // Create storage and warning log tracer for write delay. 206 var ( 207 delaystats [2]int64 208 lastWriteDelay time.Time 209 lastWriteDelayN time.Time 210 lastWritePaused time.Time 211 ) 212 213 // Iterate ad infinitum and collect the stats 214 for i := 1; ; i++ { 215 // Retrieve the database stats 216 stats, err := db.db.GetProperty("leveldb.stats") 217 if err != nil { 218 db.log.Error("Failed to read database stats", "err", err) 219 return 220 } 221 // Find the compaction table, skip the header 222 lines := strings.Split(stats, "\n") 223 for len(lines) > 0 && strings.TrimSpace(lines[0]) != "Compactions" { 224 lines = lines[1:] 225 } 226 if len(lines) <= 3 { 227 db.log.Error("Compaction table not found") 228 return 229 } 230 lines = lines[3:] 231 232 // Iterate over all the table rows, and accumulate the entries 233 for j := 0; j < len(compactions[i%2]); j++ { 234 compactions[i%2][j] = 0 235 } 236 for _, line := range lines { 237 parts := strings.Split(line, "|") 238 if len(parts) != 6 { 239 break 240 } 241 for idx, counter := range parts[3:] { 242 value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64) 243 if err != nil { 244 db.log.Error("Compaction entry parsing failed", "err", err) 245 return 246 } 247 compactions[i%2][idx] += value 248 } 249 } 250 // Update all the requested meters 251 if db.compTimeMeter != nil { 252 db.compTimeMeter.Mark(int64((compactions[i%2][0] - compactions[(i-1)%2][0]) * 1000 * 1000 * 1000)) 253 } 254 if db.compReadMeter != nil { 255 db.compReadMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1024 * 1024)) 256 } 257 if db.compWriteMeter != nil { 258 db.compWriteMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024)) 259 } 260 261 // Retrieve the write delay statistic 262 writedelay, err := db.db.GetProperty("leveldb.writedelay") 263 if err != nil { 264 db.log.Error("Failed to read database write delay statistic", "err", err) 265 return 266 } 267 var ( 268 delayN int64 269 delayDuration string 270 duration time.Duration 271 paused bool 272 ) 273 if n, err := fmt.Sscanf(writedelay, "DelayN:%d Delay:%s Paused:%t", &delayN, &delayDuration, &paused); n != 3 || err != nil { 274 db.log.Error("Write delay statistic not found") 275 return 276 } 277 duration, err = time.ParseDuration(delayDuration) 278 if err != nil { 279 db.log.Error("Failed to parse delay duration", "err", err) 280 return 281 } 282 if db.writeDelayNMeter != nil { 283 db.writeDelayNMeter.Mark(delayN - delaystats[0]) 284 // If the write delay number been collected in the last minute exceeds the predefined threshold, 285 // print a warning log here. 286 // If a warning that db performance is laggy has been displayed, 287 // any subsequent warnings will be withhold for 1 minute to don't overwhelm the user. 288 if int(db.writeDelayNMeter.Rate1()) > writeDelayNThreshold && 289 time.Now().After(lastWriteDelayN.Add(writeDelayWarningThrottler)) { 290 db.log.Warn("Write delay number exceeds the threshold (200 per second) in the last minute") 291 lastWriteDelayN = time.Now() 292 } 293 } 294 if db.writeDelayMeter != nil { 295 db.writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1]) 296 // If the write delay duration been collected in the last minute exceeds the predefined threshold, 297 // print a warning log here. 298 // If a warning that db performance is laggy has been displayed, 299 // any subsequent warnings will be withhold for 1 minute to don't overwhelm the user. 300 if int64(db.writeDelayMeter.Rate1()) > writeDelayThreshold.Nanoseconds() && 301 time.Now().After(lastWriteDelay.Add(writeDelayWarningThrottler)) { 302 db.log.Warn("Write delay duration exceeds the threshold (35% of the time) in the last minute") 303 lastWriteDelay = time.Now() 304 } 305 } 306 // If a warning that db is performing compaction has been displayed, any subsequent 307 // warnings will be withheld for one minute not to overwhelm the user. 308 if paused && delayN-delaystats[0] == 0 && duration.Nanoseconds()-delaystats[1] == 0 && 309 time.Now().After(lastWritePaused.Add(writeDelayWarningThrottler)) { 310 db.log.Warn("Database compacting, degraded performance") 311 lastWritePaused = time.Now() 312 } 313 314 delaystats[0], delaystats[1] = delayN, duration.Nanoseconds() 315 316 // Retrieve the database iostats. 317 ioStats, err := db.db.GetProperty("leveldb.iostats") 318 if err != nil { 319 db.log.Error("Failed to read database iostats", "err", err) 320 return 321 } 322 parts := strings.Split(ioStats, " ") 323 if len(parts) < 2 { 324 db.log.Error("Bad syntax of ioStats", "ioStats", ioStats) 325 return 326 } 327 r := strings.Split(parts[0], ":") 328 if len(r) < 2 { 329 db.log.Error("Bad syntax of read entry", "entry", parts[0]) 330 return 331 } 332 read, err := strconv.ParseFloat(r[1], 64) 333 if err != nil { 334 db.log.Error("Read entry parsing failed", "err", err) 335 return 336 } 337 w := strings.Split(parts[1], ":") 338 if len(w) < 2 { 339 db.log.Error("Bad syntax of write entry", "entry", parts[1]) 340 return 341 } 342 write, err := strconv.ParseFloat(w[1], 64) 343 if err != nil { 344 db.log.Error("Write entry parsing failed", "err", err) 345 return 346 } 347 if db.diskReadMeter != nil { 348 db.diskReadMeter.Mark(int64((read - iostats[0]) * 1024 * 1024)) 349 } 350 if db.diskWriteMeter != nil { 351 db.diskWriteMeter.Mark(int64((write - iostats[1]) * 1024 * 1024)) 352 } 353 iostats[0] = read 354 iostats[1] = write 355 356 // Sleep a bit, then repeat the stats collection 357 select { 358 case errc := <-db.quitChan: 359 // Quit requesting, stop hammering the database 360 errc <- nil 361 return 362 363 case <-time.After(refresh): 364 // Timeout, gather a new set of stats 365 } 366 } 367 } 368 369 func (db *LDBDatabase) NewBatch() Batch { 370 return &ldbBatch{db: db.db, b: new(leveldb.Batch)} 371 } 372 373 type ldbBatch struct { 374 db *leveldb.DB 375 b *leveldb.Batch 376 size int 377 } 378 379 func (b *ldbBatch) Put(key, value []byte) error { 380 b.b.Put(key, value) 381 b.size += len(value) 382 return nil 383 } 384 385 func (b *ldbBatch) Write() error { 386 return b.db.Write(b.b, nil) 387 } 388 389 func (b *ldbBatch) ValueSize() int { 390 return b.size 391 } 392 393 func (b *ldbBatch) Reset() { 394 b.b.Reset() 395 b.size = 0 396 } 397 398 type table struct { 399 db Database 400 prefix string 401 } 402 403 // NewTable returns a Database object that prefixes all keys with a given 404 // string. 405 func NewTable(db Database, prefix string) Database { 406 return &table{ 407 db: db, 408 prefix: prefix, 409 } 410 } 411 412 func (dt *table) Put(key []byte, value []byte) error { 413 return dt.db.Put(append([]byte(dt.prefix), key...), value) 414 } 415 416 func (dt *table) Has(key []byte) (bool, error) { 417 return dt.db.Has(append([]byte(dt.prefix), key...)) 418 } 419 420 func (dt *table) Get(key []byte) ([]byte, error) { 421 return dt.db.Get(append([]byte(dt.prefix), key...)) 422 } 423 424 func (dt *table) Delete(key []byte) error { 425 return dt.db.Delete(append([]byte(dt.prefix), key...)) 426 } 427 428 func (dt *table) Close() { 429 // Do nothing; don't close the underlying DB. 430 } 431 432 type tableBatch struct { 433 batch Batch 434 prefix string 435 } 436 437 // NewTableBatch returns a Batch object which prefixes all keys with a given string. 438 func NewTableBatch(db Database, prefix string) Batch { 439 return &tableBatch{db.NewBatch(), prefix} 440 } 441 442 func (dt *table) NewBatch() Batch { 443 return &tableBatch{dt.db.NewBatch(), dt.prefix} 444 } 445 446 func (tb *tableBatch) Put(key, value []byte) error { 447 return tb.batch.Put(append([]byte(tb.prefix), key...), value) 448 } 449 450 func (tb *tableBatch) Write() error { 451 return tb.batch.Write() 452 } 453 454 func (tb *tableBatch) ValueSize() int { 455 return tb.batch.ValueSize() 456 } 457 458 func (tb *tableBatch) Reset() { 459 tb.batch.Reset() 460 }