github.com/anthdm/go-ethereum@v1.8.4-0.20180412101906-60516c83b011/ethdb/database.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package ethdb 18 19 import ( 20 "strconv" 21 "strings" 22 "sync" 23 "time" 24 25 "github.com/ethereum/go-ethereum/log" 26 "github.com/ethereum/go-ethereum/metrics" 27 "github.com/syndtr/goleveldb/leveldb" 28 "github.com/syndtr/goleveldb/leveldb/errors" 29 "github.com/syndtr/goleveldb/leveldb/filter" 30 "github.com/syndtr/goleveldb/leveldb/iterator" 31 "github.com/syndtr/goleveldb/leveldb/opt" 32 "github.com/syndtr/goleveldb/leveldb/util" 33 ) 34 35 var OpenFileLimit = 64 36 37 type LDBDatabase struct { 38 fn string // filename for reporting 39 db *leveldb.DB // LevelDB instance 40 41 compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction 42 compReadMeter metrics.Meter // Meter for measuring the data read during compaction 43 compWriteMeter metrics.Meter // Meter for measuring the data written during compaction 44 diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read 45 diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written 46 47 quitLock sync.Mutex // Mutex protecting the quit channel access 48 quitChan chan chan error // Quit channel to stop the metrics collection before closing the database 49 50 log log.Logger // Contextual logger tracking the database path 51 } 52 53 // NewLDBDatabase returns a LevelDB wrapped object. 54 func NewLDBDatabase(file string, cache int, handles int) (*LDBDatabase, error) { 55 logger := log.New("database", file) 56 57 // Ensure we have some minimal caching and file guarantees 58 if cache < 16 { 59 cache = 16 60 } 61 if handles < 16 { 62 handles = 16 63 } 64 logger.Info("Allocated cache and file handles", "cache", cache, "handles", handles) 65 66 // Open the db and recover any potential corruptions 67 db, err := leveldb.OpenFile(file, &opt.Options{ 68 OpenFilesCacheCapacity: handles, 69 BlockCacheCapacity: cache / 2 * opt.MiB, 70 WriteBuffer: cache / 4 * opt.MiB, // Two of these are used internally 71 Filter: filter.NewBloomFilter(10), 72 }) 73 if _, corrupted := err.(*errors.ErrCorrupted); corrupted { 74 db, err = leveldb.RecoverFile(file, nil) 75 } 76 // (Re)check for errors and abort if opening of the db failed 77 if err != nil { 78 return nil, err 79 } 80 return &LDBDatabase{ 81 fn: file, 82 db: db, 83 log: logger, 84 }, nil 85 } 86 87 // Path returns the path to the database directory. 88 func (db *LDBDatabase) Path() string { 89 return db.fn 90 } 91 92 // Put puts the given key / value to the queue 93 func (db *LDBDatabase) Put(key []byte, value []byte) error { 94 return db.db.Put(key, value, nil) 95 } 96 97 func (db *LDBDatabase) Has(key []byte) (bool, error) { 98 return db.db.Has(key, nil) 99 } 100 101 // Get returns the given key if it's present. 102 func (db *LDBDatabase) Get(key []byte) ([]byte, error) { 103 dat, err := db.db.Get(key, nil) 104 if err != nil { 105 return nil, err 106 } 107 return dat, nil 108 } 109 110 // Delete deletes the key from the queue and database 111 func (db *LDBDatabase) Delete(key []byte) error { 112 return db.db.Delete(key, nil) 113 } 114 115 func (db *LDBDatabase) NewIterator() iterator.Iterator { 116 return db.db.NewIterator(nil, nil) 117 } 118 119 // NewIteratorWithPrefix returns a iterator to iterate over subset of database content with a particular prefix. 120 func (db *LDBDatabase) NewIteratorWithPrefix(prefix []byte) iterator.Iterator { 121 return db.db.NewIterator(util.BytesPrefix(prefix), nil) 122 } 123 124 func (db *LDBDatabase) Close() { 125 // Stop the metrics collection to avoid internal database races 126 db.quitLock.Lock() 127 defer db.quitLock.Unlock() 128 129 if db.quitChan != nil { 130 errc := make(chan error) 131 db.quitChan <- errc 132 if err := <-errc; err != nil { 133 db.log.Error("Metrics collection failed", "err", err) 134 } 135 } 136 err := db.db.Close() 137 if err == nil { 138 db.log.Info("Database closed") 139 } else { 140 db.log.Error("Failed to close database", "err", err) 141 } 142 } 143 144 func (db *LDBDatabase) LDB() *leveldb.DB { 145 return db.db 146 } 147 148 // Meter configures the database metrics collectors and 149 func (db *LDBDatabase) Meter(prefix string) { 150 // Short circuit metering if the metrics system is disabled 151 if !metrics.Enabled { 152 return 153 } 154 // Initialize all the metrics collector at the requested prefix 155 db.compTimeMeter = metrics.NewRegisteredMeter(prefix+"compact/time", nil) 156 db.compReadMeter = metrics.NewRegisteredMeter(prefix+"compact/input", nil) 157 db.compWriteMeter = metrics.NewRegisteredMeter(prefix+"compact/output", nil) 158 db.diskReadMeter = metrics.NewRegisteredMeter(prefix+"disk/read", nil) 159 db.diskWriteMeter = metrics.NewRegisteredMeter(prefix+"disk/write", nil) 160 161 // Create a quit channel for the periodic collector and run it 162 db.quitLock.Lock() 163 db.quitChan = make(chan chan error) 164 db.quitLock.Unlock() 165 166 go db.meter(3 * time.Second) 167 } 168 169 // meter periodically retrieves internal leveldb counters and reports them to 170 // the metrics subsystem. 171 // 172 // This is how a stats table look like (currently): 173 // Compactions 174 // Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB) 175 // -------+------------+---------------+---------------+---------------+--------------- 176 // 0 | 0 | 0.00000 | 1.27969 | 0.00000 | 12.31098 177 // 1 | 85 | 109.27913 | 28.09293 | 213.92493 | 214.26294 178 // 2 | 523 | 1000.37159 | 7.26059 | 66.86342 | 66.77884 179 // 3 | 570 | 1113.18458 | 0.00000 | 0.00000 | 0.00000 180 // 181 // This is how the iostats look like (currently): 182 // Read(MB):3895.04860 Write(MB):3654.64712 183 func (db *LDBDatabase) meter(refresh time.Duration) { 184 // Create the counters to store current and previous compaction values 185 compactions := make([][]float64, 2) 186 for i := 0; i < 2; i++ { 187 compactions[i] = make([]float64, 3) 188 } 189 // Create storage for iostats. 190 var iostats [2]float64 191 // Iterate ad infinitum and collect the stats 192 for i := 1; ; i++ { 193 // Retrieve the database stats 194 stats, err := db.db.GetProperty("leveldb.stats") 195 if err != nil { 196 db.log.Error("Failed to read database stats", "err", err) 197 return 198 } 199 // Find the compaction table, skip the header 200 lines := strings.Split(stats, "\n") 201 for len(lines) > 0 && strings.TrimSpace(lines[0]) != "Compactions" { 202 lines = lines[1:] 203 } 204 if len(lines) <= 3 { 205 db.log.Error("Compaction table not found") 206 return 207 } 208 lines = lines[3:] 209 210 // Iterate over all the table rows, and accumulate the entries 211 for j := 0; j < len(compactions[i%2]); j++ { 212 compactions[i%2][j] = 0 213 } 214 for _, line := range lines { 215 parts := strings.Split(line, "|") 216 if len(parts) != 6 { 217 break 218 } 219 for idx, counter := range parts[3:] { 220 value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64) 221 if err != nil { 222 db.log.Error("Compaction entry parsing failed", "err", err) 223 return 224 } 225 compactions[i%2][idx] += value 226 } 227 } 228 // Update all the requested meters 229 if db.compTimeMeter != nil { 230 db.compTimeMeter.Mark(int64((compactions[i%2][0] - compactions[(i-1)%2][0]) * 1000 * 1000 * 1000)) 231 } 232 if db.compReadMeter != nil { 233 db.compReadMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1024 * 1024)) 234 } 235 if db.compWriteMeter != nil { 236 db.compWriteMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024)) 237 } 238 239 // Retrieve the database iostats. 240 ioStats, err := db.db.GetProperty("leveldb.iostats") 241 if err != nil { 242 db.log.Error("Failed to read database iostats", "err", err) 243 return 244 } 245 parts := strings.Split(ioStats, " ") 246 if len(parts) < 2 { 247 db.log.Error("Bad syntax of ioStats", "ioStats", ioStats) 248 return 249 } 250 r := strings.Split(parts[0], ":") 251 if len(r) < 2 { 252 db.log.Error("Bad syntax of read entry", "entry", parts[0]) 253 return 254 } 255 read, err := strconv.ParseFloat(r[1], 64) 256 if err != nil { 257 db.log.Error("Read entry parsing failed", "err", err) 258 return 259 } 260 w := strings.Split(parts[1], ":") 261 if len(w) < 2 { 262 db.log.Error("Bad syntax of write entry", "entry", parts[1]) 263 return 264 } 265 write, err := strconv.ParseFloat(w[1], 64) 266 if err != nil { 267 db.log.Error("Write entry parsing failed", "err", err) 268 return 269 } 270 if db.diskReadMeter != nil { 271 db.diskReadMeter.Mark(int64((read - iostats[0]) * 1024 * 1024)) 272 } 273 if db.diskWriteMeter != nil { 274 db.diskWriteMeter.Mark(int64((write - iostats[1]) * 1024 * 1024)) 275 } 276 iostats[0] = read 277 iostats[1] = write 278 279 // Sleep a bit, then repeat the stats collection 280 select { 281 case errc := <-db.quitChan: 282 // Quit requesting, stop hammering the database 283 errc <- nil 284 return 285 286 case <-time.After(refresh): 287 // Timeout, gather a new set of stats 288 } 289 } 290 } 291 292 func (db *LDBDatabase) NewBatch() Batch { 293 return &ldbBatch{db: db.db, b: new(leveldb.Batch)} 294 } 295 296 type ldbBatch struct { 297 db *leveldb.DB 298 b *leveldb.Batch 299 size int 300 } 301 302 func (b *ldbBatch) Put(key, value []byte) error { 303 b.b.Put(key, value) 304 b.size += len(value) 305 return nil 306 } 307 308 func (b *ldbBatch) Write() error { 309 return b.db.Write(b.b, nil) 310 } 311 312 func (b *ldbBatch) ValueSize() int { 313 return b.size 314 } 315 316 func (b *ldbBatch) Reset() { 317 b.b.Reset() 318 b.size = 0 319 } 320 321 type table struct { 322 db Database 323 prefix string 324 } 325 326 // NewTable returns a Database object that prefixes all keys with a given 327 // string. 328 func NewTable(db Database, prefix string) Database { 329 return &table{ 330 db: db, 331 prefix: prefix, 332 } 333 } 334 335 func (dt *table) Put(key []byte, value []byte) error { 336 return dt.db.Put(append([]byte(dt.prefix), key...), value) 337 } 338 339 func (dt *table) Has(key []byte) (bool, error) { 340 return dt.db.Has(append([]byte(dt.prefix), key...)) 341 } 342 343 func (dt *table) Get(key []byte) ([]byte, error) { 344 return dt.db.Get(append([]byte(dt.prefix), key...)) 345 } 346 347 func (dt *table) Delete(key []byte) error { 348 return dt.db.Delete(append([]byte(dt.prefix), key...)) 349 } 350 351 func (dt *table) Close() { 352 // Do nothing; don't close the underlying DB. 353 } 354 355 type tableBatch struct { 356 batch Batch 357 prefix string 358 } 359 360 // NewTableBatch returns a Batch object which prefixes all keys with a given string. 361 func NewTableBatch(db Database, prefix string) Batch { 362 return &tableBatch{db.NewBatch(), prefix} 363 } 364 365 func (dt *table) NewBatch() Batch { 366 return &tableBatch{dt.db.NewBatch(), dt.prefix} 367 } 368 369 func (tb *tableBatch) Put(key, value []byte) error { 370 return tb.batch.Put(append([]byte(tb.prefix), key...), value) 371 } 372 373 func (tb *tableBatch) Write() error { 374 return tb.batch.Write() 375 } 376 377 func (tb *tableBatch) ValueSize() int { 378 return tb.batch.ValueSize() 379 } 380 381 func (tb *tableBatch) Reset() { 382 tb.batch.Reset() 383 }