github.com/linapex/ethereum-go-chinese@v0.0.0-20190316121929-f8b7a73c3fa1/ethdb/database.go (about) 1 2 //<developer> 3 // <name>linapex 曹一峰</name> 4 // <email>linapex@163.com</email> 5 // <wx>superexc</wx> 6 // <qqgroup>128148617</qqgroup> 7 // <url>https://jsq.ink</url> 8 // <role>pku engineer</role> 9 // <date>2019-03-16 19:16:38</date> 10 //</624450090324463616> 11 12 13 //+建设!JS 14 15 package ethdb 16 17 import ( 18 "fmt" 19 "strconv" 20 "strings" 21 "sync" 22 "time" 23 24 "github.com/ethereum/go-ethereum/log" 25 "github.com/ethereum/go-ethereum/metrics" 26 "github.com/syndtr/goleveldb/leveldb" 27 "github.com/syndtr/goleveldb/leveldb/errors" 28 "github.com/syndtr/goleveldb/leveldb/filter" 29 "github.com/syndtr/goleveldb/leveldb/iterator" 30 "github.com/syndtr/goleveldb/leveldb/opt" 31 "github.com/syndtr/goleveldb/leveldb/util" 32 ) 33 34 const ( 35 writePauseWarningThrottler = 1 * time.Minute 36 ) 37 38 var OpenFileLimit = 64 39 40 type LDBDatabase struct { 41 fn string //报告文件名 42 db *leveldb.DB //LevelDB实例 43 44 compTimeMeter metrics.Meter //用于测量数据库压缩所花费的总时间的仪表 45 compReadMeter metrics.Meter //测量压实过程中读取数据的仪表 46 compWriteMeter metrics.Meter //测量压实过程中写入数据的仪表 47 writeDelayNMeter metrics.Meter //用于测量数据库压缩导致的写入延迟数的仪表 48 writeDelayMeter metrics.Meter //用于测量数据库压缩导致的写入延迟持续时间的仪表 49 diskReadMeter metrics.Meter //测量有效数据读取量的仪表 50 diskWriteMeter metrics.Meter //测量写入数据有效量的仪表 51 52 quitLock sync.Mutex //互斥保护退出通道访问 53 quitChan chan chan error //退出通道以在关闭数据库之前停止度量集合 54 55 log log.Logger //上下文记录器跟踪数据库路径 56 } 57 58 //NewLdbDatabase返回一个LevelDB包装的对象。 59 func NewLDBDatabase(file string, cache int, handles int) (*LDBDatabase, error) { 60 logger := log.New("database", file) 61 62 //确保我们有一些最小的缓存和文件保证 63 if cache < 16 { 64 cache = 16 65 } 66 if handles < 16 { 67 handles = 16 68 } 69 logger.Info("Allocated cache and file handles", "cache", cache, "handles", handles) 70 71 //打开数据库并恢复任何潜在的损坏 72 db, err := leveldb.OpenFile(file, &opt.Options{ 73 OpenFilesCacheCapacity: handles, 74 BlockCacheCapacity: cache / 2 * opt.MiB, 75 WriteBuffer: cache / 4 * opt.MiB, //其中两个在内部使用 76 Filter: filter.NewBloomFilter(10), 77 }) 78 if _, corrupted := err.(*errors.ErrCorrupted); corrupted { 79 db, err = leveldb.RecoverFile(file, nil) 80 } 81 //(re)检查错误,如果打开数据库失败,则中止。 82 if err != nil { 83 return nil, err 84 } 85 return &LDBDatabase{ 86 fn: file, 87 db: db, 88 log: logger, 89 }, nil 90 } 91 92 //path返回数据库目录的路径。 93 func (db *LDBDatabase) Path() string { 94 return db.fn 95 } 96 97 //Put将给定的键/值放入队列 98 func (db *LDBDatabase) Put(key []byte, value []byte) error { 99 return db.db.Put(key, value, nil) 100 } 101 102 func (db *LDBDatabase) Has(key []byte) (bool, error) { 103 return db.db.Has(key, nil) 104 } 105 106 //get返回给定的键(如果存在)。 107 func (db *LDBDatabase) Get(key []byte) ([]byte, error) { 108 dat, err := db.db.Get(key, nil) 109 if err != nil { 110 return nil, err 111 } 112 return dat, nil 113 } 114 115 //删除从队列和数据库中删除键 116 func (db *LDBDatabase) Delete(key []byte) error { 117 return db.db.Delete(key, nil) 118 } 119 120 func (db *LDBDatabase) NewIterator() iterator.Iterator { 121 return db.db.NewIterator(nil, nil) 122 } 123 124 //NewIteratorWithPrefix返回一个迭代器,用于使用特定前缀对数据库内容的子集进行迭代。 125 func (db *LDBDatabase) NewIteratorWithPrefix(prefix []byte) iterator.Iterator { 126 return db.db.NewIterator(util.BytesPrefix(prefix), nil) 127 } 128 129 func (db *LDBDatabase) Close() { 130 //停止度量集合以避免内部数据库争用 131 db.quitLock.Lock() 132 defer db.quitLock.Unlock() 133 134 if db.quitChan != nil { 135 errc := make(chan error) 136 db.quitChan <- errc 137 if err := <-errc; err != nil { 138 db.log.Error("Metrics collection failed", "err", err) 139 } 140 db.quitChan = nil 141 } 142 err := db.db.Close() 143 if err == nil { 144 db.log.Info("Database closed") 145 } else { 146 db.log.Error("Failed to close database", "err", err) 147 } 148 } 149 150 func (db *LDBDatabase) LDB() *leveldb.DB { 151 return db.db 152 } 153 154 //Meter配置数据库度量收集器和 155 func (db *LDBDatabase) Meter(prefix string) { 156 //在请求的前缀处初始化所有度量收集器 157 db.compTimeMeter = metrics.NewRegisteredMeter(prefix+"compact/time", nil) 158 db.compReadMeter = metrics.NewRegisteredMeter(prefix+"compact/input", nil) 159 db.compWriteMeter = metrics.NewRegisteredMeter(prefix+"compact/output", nil) 160 db.diskReadMeter = metrics.NewRegisteredMeter(prefix+"disk/read", nil) 161 db.diskWriteMeter = metrics.NewRegisteredMeter(prefix+"disk/write", nil) 162 db.writeDelayMeter = metrics.NewRegisteredMeter(prefix+"compact/writedelay/duration", nil) 163 db.writeDelayNMeter = metrics.NewRegisteredMeter(prefix+"compact/writedelay/counter", nil) 164 165 //为定期收集器创建退出通道并运行它 166 db.quitLock.Lock() 167 db.quitChan = make(chan chan error) 168 db.quitLock.Unlock() 169 170 go db.meter(3 * time.Second) 171 } 172 173 //Meter定期检索内部LevelDB计数器并将其报告给 174 //度量子系统。 175 // 176 //这是统计表的外观(当前): 177 //压实 178 //级别表大小(MB)时间(秒)读取(MB)写入(MB) 179 //————+——————+——————+————+——————+——————+————+————+——————+——————————————— 180 //0 0 0.00000 1.27969 0.00000 12.31098 181 //1 85 109.27913 28.09293 213.92493 214.26294 182 //2 523 1000.37159 7.26059 66.86342 66.77884 183 //3 570 1113.18458 0.00000 0.00000 0.00000 184 // 185 //这就是写入延迟(当前)的样子: 186 //Delayn:5延迟:406.604657ms暂停:假 187 // 188 //这就是iostats(当前)的样子: 189 //读(MB):3895.04860写(MB):3654.64712 190 func (db *LDBDatabase) meter(refresh time.Duration) { 191 //Create the counters to store current and previous compaction values 192 compactions := make([][]float64, 2) 193 for i := 0; i < 2; i++ { 194 compactions[i] = make([]float64, 3) 195 } 196 //为iostats创建存储。 197 var iostats [2]float64 198 199 //为写入延迟创建存储和警告日志跟踪程序。 200 var ( 201 delaystats [2]int64 202 lastWritePaused time.Time 203 ) 204 205 var ( 206 errc chan error 207 merr error 208 ) 209 210 //无限迭代并收集统计信息 211 for i := 1; errc == nil && merr == nil; i++ { 212 //检索数据库状态 213 stats, err := db.db.GetProperty("leveldb.stats") 214 if err != nil { 215 db.log.Error("Failed to read database stats", "err", err) 216 merr = err 217 continue 218 } 219 //找到压缩表,跳过标题 220 lines := strings.Split(stats, "\n") 221 for len(lines) > 0 && strings.TrimSpace(lines[0]) != "Compactions" { 222 lines = lines[1:] 223 } 224 if len(lines) <= 3 { 225 db.log.Error("Compaction table not found") 226 merr = errors.New("compaction table not found") 227 continue 228 } 229 lines = lines[3:] 230 231 //Iterate over all the table rows, and accumulate the entries 232 for j := 0; j < len(compactions[i%2]); j++ { 233 compactions[i%2][j] = 0 234 } 235 for _, line := range lines { 236 parts := strings.Split(line, "|") 237 if len(parts) != 6 { 238 break 239 } 240 for idx, counter := range parts[3:] { 241 value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64) 242 if err != nil { 243 db.log.Error("Compaction entry parsing failed", "err", err) 244 merr = err 245 continue 246 } 247 compactions[i%2][idx] += value 248 } 249 } 250 //更新所有要求的仪表 251 if db.compTimeMeter != nil { 252 db.compTimeMeter.Mark(int64((compactions[i%2][0] - compactions[(i-1)%2][0]) * 1000 * 1000 * 1000)) 253 } 254 if db.compReadMeter != nil { 255 db.compReadMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1024 * 1024)) 256 } 257 if db.compWriteMeter != nil { 258 db.compWriteMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024)) 259 } 260 261 //检索写入延迟统计信息 262 writedelay, err := db.db.GetProperty("leveldb.writedelay") 263 if err != nil { 264 db.log.Error("Failed to read database write delay statistic", "err", err) 265 merr = err 266 continue 267 } 268 var ( 269 delayN int64 270 delayDuration string 271 duration time.Duration 272 paused bool 273 ) 274 if n, err := fmt.Sscanf(writedelay, "DelayN:%d Delay:%s Paused:%t", &delayN, &delayDuration, &paused); n != 3 || err != nil { 275 db.log.Error("Write delay statistic not found") 276 merr = err 277 continue 278 } 279 duration, err = time.ParseDuration(delayDuration) 280 if err != nil { 281 db.log.Error("Failed to parse delay duration", "err", err) 282 merr = err 283 continue 284 } 285 if db.writeDelayNMeter != nil { 286 db.writeDelayNMeter.Mark(delayN - delaystats[0]) 287 } 288 if db.writeDelayMeter != nil { 289 db.writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1]) 290 } 291 //如果显示了数据库正在执行压缩的警告,则 292 //警告将被保留一分钟,以防压倒用户。 293 if paused && delayN-delaystats[0] == 0 && duration.Nanoseconds()-delaystats[1] == 0 && 294 time.Now().After(lastWritePaused.Add(writePauseWarningThrottler)) { 295 db.log.Warn("Database compacting, degraded performance") 296 lastWritePaused = time.Now() 297 } 298 delaystats[0], delaystats[1] = delayN, duration.Nanoseconds() 299 300 //检索数据库iostats。 301 ioStats, err := db.db.GetProperty("leveldb.iostats") 302 if err != nil { 303 db.log.Error("Failed to read database iostats", "err", err) 304 merr = err 305 continue 306 } 307 var nRead, nWrite float64 308 parts := strings.Split(ioStats, " ") 309 if len(parts) < 2 { 310 db.log.Error("Bad syntax of ioStats", "ioStats", ioStats) 311 merr = fmt.Errorf("bad syntax of ioStats %s", ioStats) 312 continue 313 } 314 if n, err := fmt.Sscanf(parts[0], "Read(MB):%f", &nRead); n != 1 || err != nil { 315 db.log.Error("Bad syntax of read entry", "entry", parts[0]) 316 merr = err 317 continue 318 } 319 if n, err := fmt.Sscanf(parts[1], "Write(MB):%f", &nWrite); n != 1 || err != nil { 320 db.log.Error("Bad syntax of write entry", "entry", parts[1]) 321 merr = err 322 continue 323 } 324 if db.diskReadMeter != nil { 325 db.diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024)) 326 } 327 if db.diskWriteMeter != nil { 328 db.diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024)) 329 } 330 iostats[0], iostats[1] = nRead, nWrite 331 332 //睡一会儿,然后重复统计数据收集 333 select { 334 case errc = <-db.quitChan: 335 //停止请求,停止锤击数据库 336 case <-time.After(refresh): 337 //超时,收集一组新的统计信息 338 } 339 } 340 341 if errc == nil { 342 errc = <-db.quitChan 343 } 344 errc <- merr 345 } 346 347 func (db *LDBDatabase) NewBatch() Batch { 348 return &ldbBatch{db: db.db, b: new(leveldb.Batch)} 349 } 350 351 type ldbBatch struct { 352 db *leveldb.DB 353 b *leveldb.Batch 354 size int 355 } 356 357 func (b *ldbBatch) Put(key, value []byte) error { 358 b.b.Put(key, value) 359 b.size += len(value) 360 return nil 361 } 362 363 func (b *ldbBatch) Delete(key []byte) error { 364 b.b.Delete(key) 365 b.size += 1 366 return nil 367 } 368 369 func (b *ldbBatch) Write() error { 370 return b.db.Write(b.b, nil) 371 } 372 373 func (b *ldbBatch) ValueSize() int { 374 return b.size 375 } 376 377 func (b *ldbBatch) Reset() { 378 b.b.Reset() 379 b.size = 0 380 } 381