github.com/jincm/wesharechain@v0.0.0-20210122032815-1537409ce26a/chain/swarm/shed/db.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package shed provides a simple abstraction components to compose 18 // more complex operations on storage data organized in fields and indexes. 19 // 20 // Only type which holds logical information about swarm storage chunks data 21 // and metadata is Item. This part is not generalized mostly for 22 // performance reasons. 23 package shed 24 25 import ( 26 "fmt" 27 "strconv" 28 "strings" 29 "time" 30 31 "github.com/ethereum/go-ethereum/metrics" 32 "github.com/ethereum/go-ethereum/swarm/log" 33 "github.com/syndtr/goleveldb/leveldb" 34 "github.com/syndtr/goleveldb/leveldb/iterator" 35 "github.com/syndtr/goleveldb/leveldb/opt" 36 ) 37 38 const ( 39 openFileLimit = 128 // The limit for LevelDB OpenFilesCacheCapacity. 40 writePauseWarningThrottler = 1 * time.Minute 41 ) 42 43 // DB provides abstractions over LevelDB in order to 44 // implement complex structures using fields and ordered indexes. 45 // It provides a schema functionality to store fields and indexes 46 // information about naming and types. 47 type DB struct { 48 ldb *leveldb.DB 49 50 compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction 51 compReadMeter metrics.Meter // Meter for measuring the data read during compaction 52 compWriteMeter metrics.Meter // Meter for measuring the data written during compaction 53 writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction 54 writeDelayMeter metrics.Meter // Meter for measuring the write delay duration due to database compaction 55 diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read 56 diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written 57 58 quit chan struct{} // Quit channel to stop the metrics collection before closing the database 59 } 60 61 // NewDB constructs a new DB and validates the schema 62 // if it exists in database on the given path. 63 // metricsPrefix is used for metrics collection for the given DB. 64 func NewDB(path string, metricsPrefix string) (db *DB, err error) { 65 ldb, err := leveldb.OpenFile(path, &opt.Options{ 66 OpenFilesCacheCapacity: openFileLimit, 67 }) 68 if err != nil { 69 return nil, err 70 } 71 db = &DB{ 72 ldb: ldb, 73 } 74 75 if _, err = db.getSchema(); err != nil { 76 if err == leveldb.ErrNotFound { 77 // save schema with initialized default fields 78 if err = db.putSchema(schema{ 79 Fields: make(map[string]fieldSpec), 80 Indexes: make(map[byte]indexSpec), 81 }); err != nil { 82 return nil, err 83 } 84 } else { 85 return nil, err 86 } 87 } 88 89 // Configure meters for DB 90 db.configure(metricsPrefix) 91 92 // Create a quit channel for the periodic metrics collector and run it 93 db.quit = make(chan struct{}) 94 95 go db.meter(10 * time.Second) 96 97 return db, nil 98 } 99 100 // Put wraps LevelDB Put method to increment metrics counter. 101 func (db *DB) Put(key []byte, value []byte) (err error) { 102 err = db.ldb.Put(key, value, nil) 103 if err != nil { 104 metrics.GetOrRegisterCounter("DB.putFail", nil).Inc(1) 105 return err 106 } 107 metrics.GetOrRegisterCounter("DB.put", nil).Inc(1) 108 return nil 109 } 110 111 // Get wraps LevelDB Get method to increment metrics counter. 112 func (db *DB) Get(key []byte) (value []byte, err error) { 113 value, err = db.ldb.Get(key, nil) 114 if err != nil { 115 if err == leveldb.ErrNotFound { 116 metrics.GetOrRegisterCounter("DB.getNotFound", nil).Inc(1) 117 } else { 118 metrics.GetOrRegisterCounter("DB.getFail", nil).Inc(1) 119 } 120 return nil, err 121 } 122 metrics.GetOrRegisterCounter("DB.get", nil).Inc(1) 123 return value, nil 124 } 125 126 // Delete wraps LevelDB Delete method to increment metrics counter. 127 func (db *DB) Delete(key []byte) (err error) { 128 err = db.ldb.Delete(key, nil) 129 if err != nil { 130 metrics.GetOrRegisterCounter("DB.deleteFail", nil).Inc(1) 131 return err 132 } 133 metrics.GetOrRegisterCounter("DB.delete", nil).Inc(1) 134 return nil 135 } 136 137 // NewIterator wraps LevelDB NewIterator method to increment metrics counter. 138 func (db *DB) NewIterator() iterator.Iterator { 139 metrics.GetOrRegisterCounter("DB.newiterator", nil).Inc(1) 140 141 return db.ldb.NewIterator(nil, nil) 142 } 143 144 // WriteBatch wraps LevelDB Write method to increment metrics counter. 145 func (db *DB) WriteBatch(batch *leveldb.Batch) (err error) { 146 err = db.ldb.Write(batch, nil) 147 if err != nil { 148 metrics.GetOrRegisterCounter("DB.writebatchFail", nil).Inc(1) 149 return err 150 } 151 metrics.GetOrRegisterCounter("DB.writebatch", nil).Inc(1) 152 return nil 153 } 154 155 // Close closes LevelDB database. 156 func (db *DB) Close() (err error) { 157 close(db.quit) 158 return db.ldb.Close() 159 } 160 161 // Configure configures the database metrics collectors 162 func (db *DB) configure(prefix string) { 163 // Initialize all the metrics collector at the requested prefix 164 db.compTimeMeter = metrics.NewRegisteredMeter(prefix+"compact/time", nil) 165 db.compReadMeter = metrics.NewRegisteredMeter(prefix+"compact/input", nil) 166 db.compWriteMeter = metrics.NewRegisteredMeter(prefix+"compact/output", nil) 167 db.diskReadMeter = metrics.NewRegisteredMeter(prefix+"disk/read", nil) 168 db.diskWriteMeter = metrics.NewRegisteredMeter(prefix+"disk/write", nil) 169 db.writeDelayMeter = metrics.NewRegisteredMeter(prefix+"compact/writedelay/duration", nil) 170 db.writeDelayNMeter = metrics.NewRegisteredMeter(prefix+"compact/writedelay/counter", nil) 171 } 172 173 func (db *DB) meter(refresh time.Duration) { 174 // Create the counters to store current and previous compaction values 175 compactions := make([][]float64, 2) 176 for i := 0; i < 2; i++ { 177 compactions[i] = make([]float64, 3) 178 } 179 // Create storage for iostats. 180 var iostats [2]float64 181 182 // Create storage and warning log tracer for write delay. 183 var ( 184 delaystats [2]int64 185 lastWritePaused time.Time 186 ) 187 188 // Iterate ad infinitum and collect the stats 189 for i := 1; true; i++ { 190 // Retrieve the database stats 191 stats, err := db.ldb.GetProperty("leveldb.stats") 192 if err != nil { 193 log.Error("Failed to read database stats", "err", err) 194 continue 195 } 196 // Find the compaction table, skip the header 197 lines := strings.Split(stats, "\n") 198 for len(lines) > 0 && strings.TrimSpace(lines[0]) != "Compactions" { 199 lines = lines[1:] 200 } 201 if len(lines) <= 3 { 202 log.Error("Compaction table not found") 203 continue 204 } 205 lines = lines[3:] 206 207 // Iterate over all the table rows, and accumulate the entries 208 for j := 0; j < len(compactions[i%2]); j++ { 209 compactions[i%2][j] = 0 210 } 211 for _, line := range lines { 212 parts := strings.Split(line, "|") 213 if len(parts) != 6 { 214 break 215 } 216 for idx, counter := range parts[3:] { 217 value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64) 218 if err != nil { 219 log.Error("Compaction entry parsing failed", "err", err) 220 continue 221 } 222 compactions[i%2][idx] += value 223 } 224 } 225 // Update all the requested meters 226 if db.compTimeMeter != nil { 227 db.compTimeMeter.Mark(int64((compactions[i%2][0] - compactions[(i-1)%2][0]) * 1000 * 1000 * 1000)) 228 } 229 if db.compReadMeter != nil { 230 db.compReadMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1024 * 1024)) 231 } 232 if db.compWriteMeter != nil { 233 db.compWriteMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024)) 234 } 235 236 // Retrieve the write delay statistic 237 writedelay, err := db.ldb.GetProperty("leveldb.writedelay") 238 if err != nil { 239 log.Error("Failed to read database write delay statistic", "err", err) 240 continue 241 } 242 var ( 243 delayN int64 244 delayDuration string 245 duration time.Duration 246 paused bool 247 ) 248 if n, err := fmt.Sscanf(writedelay, "DelayN:%d Delay:%s Paused:%t", &delayN, &delayDuration, &paused); n != 3 || err != nil { 249 log.Error("Write delay statistic not found") 250 continue 251 } 252 duration, err = time.ParseDuration(delayDuration) 253 if err != nil { 254 log.Error("Failed to parse delay duration", "err", err) 255 continue 256 } 257 if db.writeDelayNMeter != nil { 258 db.writeDelayNMeter.Mark(delayN - delaystats[0]) 259 } 260 if db.writeDelayMeter != nil { 261 db.writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1]) 262 } 263 // If a warning that db is performing compaction has been displayed, any subsequent 264 // warnings will be withheld for one minute not to overwhelm the user. 265 if paused && delayN-delaystats[0] == 0 && duration.Nanoseconds()-delaystats[1] == 0 && 266 time.Now().After(lastWritePaused.Add(writePauseWarningThrottler)) { 267 log.Warn("Database compacting, degraded performance") 268 lastWritePaused = time.Now() 269 } 270 delaystats[0], delaystats[1] = delayN, duration.Nanoseconds() 271 272 // Retrieve the database iostats. 273 ioStats, err := db.ldb.GetProperty("leveldb.iostats") 274 if err != nil { 275 log.Error("Failed to read database iostats", "err", err) 276 continue 277 } 278 var nRead, nWrite float64 279 parts := strings.Split(ioStats, " ") 280 if len(parts) < 2 { 281 log.Error("Bad syntax of ioStats", "ioStats", ioStats) 282 continue 283 } 284 if n, err := fmt.Sscanf(parts[0], "Read(MB):%f", &nRead); n != 1 || err != nil { 285 log.Error("Bad syntax of read entry", "entry", parts[0]) 286 continue 287 } 288 if n, err := fmt.Sscanf(parts[1], "Write(MB):%f", &nWrite); n != 1 || err != nil { 289 log.Error("Bad syntax of write entry", "entry", parts[1]) 290 continue 291 } 292 if db.diskReadMeter != nil { 293 db.diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024)) 294 } 295 if db.diskWriteMeter != nil { 296 db.diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024)) 297 } 298 iostats[0], iostats[1] = nRead, nWrite 299 300 // Sleep a bit, then repeat the stats collection 301 select { 302 case <-db.quit: 303 // Quit requesting, stop hammering the database 304 return 305 case <-time.After(refresh): 306 // Timeout, gather a new set of stats 307 } 308 } 309 }