github.com/klaytn/klaytn@v1.12.1/storage/database/leveldb_database.go (about) 1 // Modifications Copyright 2018 The klaytn Authors 2 // Copyright 2015 The go-ethereum Authors 3 // This file is part of the go-ethereum library. 4 // 5 // The go-ethereum library is free software: you can redistribute it and/or modify 6 // it under the terms of the GNU Lesser General Public License as published by 7 // the Free Software Foundation, either version 3 of the License, or 8 // (at your option) any later version. 9 // 10 // The go-ethereum library is distributed in the hope that it will be useful, 11 // but WITHOUT ANY WARRANTY; without even the implied warranty of 12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 // GNU Lesser General Public License for more details. 14 // 15 // You should have received a copy of the GNU Lesser General Public License 16 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 17 // 18 // This file is derived from ethdb/database.go (2018/06/04). 19 // Modified and improved for the klaytn development. 20 21 package database 22 23 import ( 24 "fmt" 25 "strings" 26 "sync" 27 "time" 28 29 klaytnmetrics "github.com/klaytn/klaytn/metrics" 30 31 "github.com/klaytn/klaytn/common/fdlimit" 32 "github.com/klaytn/klaytn/log" 33 metricutils "github.com/klaytn/klaytn/metrics/utils" 34 "github.com/rcrowley/go-metrics" 35 "github.com/syndtr/goleveldb/leveldb" 36 "github.com/syndtr/goleveldb/leveldb/errors" 37 "github.com/syndtr/goleveldb/leveldb/filter" 38 "github.com/syndtr/goleveldb/leveldb/opt" 39 "github.com/syndtr/goleveldb/leveldb/util" 40 ) 41 42 var OpenFileLimit = 64 43 44 type LevelDBCompressionType uint8 45 46 const ( 47 AllNoCompression LevelDBCompressionType = iota 48 ReceiptOnlySnappyCompression 49 StateTrieOnlyNoCompression 50 AllSnappyCompression 51 ) 52 53 const ( 54 minWriteBufferSize = 2 * opt.MiB 55 minBlockCacheCapacity = 2 * minWriteBufferSize 56 MinOpenFilesCacheCapacity = 16 57 minBitsPerKeyForFilter = 10 58 minFileDescriptorsForDBManager = 2048 59 minFileDescriptorsForLevelDB = 16 60 ) 61 62 var defaultLevelDBOption = &opt.Options{ 63 WriteBuffer: minWriteBufferSize, 64 BlockCacheCapacity: minBlockCacheCapacity, 65 OpenFilesCacheCapacity: MinOpenFilesCacheCapacity, 66 Filter: filter.NewBloomFilter(minBitsPerKeyForFilter), 67 DisableBufferPool: false, 68 DisableSeeksCompaction: true, 69 } 70 71 // GetDefaultLevelDBOption returns default LevelDB option copied from defaultLevelDBOption. 72 // defaultLevelDBOption has fields with minimum values. 73 func GetDefaultLevelDBOption() *opt.Options { 74 copiedOption := *defaultLevelDBOption 75 return &copiedOption 76 } 77 78 // GetOpenFilesLimit raises out the number of allowed file handles per process 79 // for Klaytn and returns half of the allowance to assign to the database. 80 func GetOpenFilesLimit() int { 81 limit, err := fdlimit.Current() 82 if err != nil { 83 logger.Crit("Failed to retrieve file descriptor allowance", "err", err) 84 } 85 if limit < minFileDescriptorsForDBManager { 86 raised, err := fdlimit.Raise(minFileDescriptorsForDBManager) 87 if err != nil || raised < minFileDescriptorsForDBManager { 88 logger.Crit("Raised number of file descriptor is below the minimum value", 89 "currFileDescriptorsLimit", limit, "minFileDescriptorsForDBManager", minFileDescriptorsForDBManager) 90 } 91 limit = int(raised) 92 } 93 return limit / 2 // Leave half for networking and other stuff 94 } 95 96 type levelDB struct { 97 fn string // filename for reporting 98 db *leveldb.DB // LevelDB instance 99 100 writeDelayCountMeter metrics.Meter // Meter for measuring the cumulative number of write delays 101 writeDelayDurationMeter metrics.Meter // Meter for measuring the cumulative duration of write delays 102 103 aliveSnapshotsMeter metrics.Meter // Meter for measuring the number of alive snapshots 104 aliveIteratorsMeter metrics.Meter // Meter for measuring the number of alive iterators 105 106 compTimer klaytnmetrics.HybridTimer // Meter for measuring the total time spent in database compaction 107 compReadMeter metrics.Meter // Meter for measuring the data read during compaction 108 compWriteMeter metrics.Meter // Meter for measuring the data written during compaction 109 diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read 110 diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written 111 blockCacheGauge metrics.Gauge // Gauge for measuring the current size of block cache 112 openedTablesCountMeter metrics.Meter 113 memCompGauge metrics.Gauge // Gauge for tracking the number of memory compaction 114 level0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in level0 115 nonlevel0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in non0 level 116 seekCompGauge metrics.Gauge // Gauge for tracking the number of table compaction caused by read opt 117 118 levelSizesGauge []metrics.Gauge 119 levelTablesGauge []metrics.Gauge 120 levelReadGauge []metrics.Gauge 121 levelWriteGauge []metrics.Gauge 122 levelDurationsGauge []metrics.Gauge 123 124 perfCheck bool 125 getTimer klaytnmetrics.HybridTimer 126 putTimer klaytnmetrics.HybridTimer 127 batchWriteTimer klaytnmetrics.HybridTimer 128 129 quitLock sync.Mutex // Mutex protecting the quit channel access 130 quitChan chan chan error // Quit channel to stop the metrics collection before closing the database 131 132 prefix string // prefix used for metrics 133 logger log.Logger // Contextual logger tracking the database path 134 } 135 136 func getLevelDBOptions(dbc *DBConfig) *opt.Options { 137 newOption := &opt.Options{ 138 OpenFilesCacheCapacity: dbc.OpenFilesLimit, 139 BlockCacheCapacity: dbc.LevelDBCacheSize / 2 * opt.MiB, 140 WriteBuffer: dbc.LevelDBCacheSize / 2 * opt.MiB, 141 Filter: filter.NewBloomFilter(10), 142 DisableBufferPool: !dbc.LevelDBBufferPool, 143 CompactionTableSize: 2 * opt.MiB, 144 CompactionTableSizeMultiplier: 1.0, 145 DisableSeeksCompaction: true, 146 } 147 148 return newOption 149 } 150 151 func NewLevelDB(dbc *DBConfig, entryType DBEntryType) (*levelDB, error) { 152 localLogger := logger.NewWith("path", dbc.Dir) 153 154 // Ensure we have some minimal caching and file guarantees 155 if dbc.LevelDBCacheSize < 16 { 156 dbc.LevelDBCacheSize = 16 157 } 158 if dbc.OpenFilesLimit < minFileDescriptorsForLevelDB { 159 dbc.OpenFilesLimit = minFileDescriptorsForLevelDB 160 } 161 162 ldbOpts := getLevelDBOptions(dbc) 163 ldbOpts.Compression = getCompressionType(dbc.LevelDBCompression, entryType) 164 165 localLogger.Info("LevelDB configurations", 166 "levelDBCacheSize", (ldbOpts.WriteBuffer+ldbOpts.BlockCacheCapacity)/opt.MiB, "openFilesLimit", ldbOpts.OpenFilesCacheCapacity, 167 "useBufferPool", !ldbOpts.DisableBufferPool, "usePerfCheck", dbc.EnableDBPerfMetrics, "compressionType", ldbOpts.Compression, 168 "compactionTableSize(MB)", ldbOpts.CompactionTableSize/opt.MiB, "compactionTableSizeMultiplier", ldbOpts.CompactionTableSizeMultiplier) 169 170 // Open the db and recover any potential corruptions 171 db, err := leveldb.OpenFile(dbc.Dir, ldbOpts) 172 if _, corrupted := err.(*errors.ErrCorrupted); corrupted { 173 db, err = leveldb.RecoverFile(dbc.Dir, nil) 174 } 175 // (Re)check for errors and abort if opening of the db failed 176 if err != nil { 177 return nil, err 178 } 179 return &levelDB{ 180 fn: dbc.Dir, 181 db: db, 182 logger: localLogger, 183 perfCheck: dbc.EnableDBPerfMetrics, 184 }, nil 185 } 186 187 // setMinLevelDBOption sets some value of options if they are smaller than minimum value. 188 func setMinLevelDBOption(ldbOption *opt.Options) { 189 if ldbOption.WriteBuffer < minWriteBufferSize { 190 ldbOption.WriteBuffer = minWriteBufferSize 191 } 192 193 if ldbOption.BlockCacheCapacity < minBlockCacheCapacity { 194 ldbOption.BlockCacheCapacity = minBlockCacheCapacity 195 } 196 197 if ldbOption.OpenFilesCacheCapacity < MinOpenFilesCacheCapacity { 198 ldbOption.OpenFilesCacheCapacity = MinOpenFilesCacheCapacity 199 } 200 } 201 202 func getCompressionType(ct LevelDBCompressionType, dbEntryType DBEntryType) opt.Compression { 203 if ct == AllSnappyCompression { 204 return opt.SnappyCompression 205 } 206 207 if ct == AllNoCompression { 208 return opt.NoCompression 209 } 210 211 if ct == ReceiptOnlySnappyCompression { 212 if dbEntryType == ReceiptsDB { 213 return opt.SnappyCompression 214 } else { 215 return opt.NoCompression 216 } 217 } 218 219 if ct == StateTrieOnlyNoCompression { 220 if dbEntryType == StateTrieDB { 221 return opt.NoCompression 222 } else { 223 return opt.SnappyCompression 224 } 225 } 226 return opt.NoCompression 227 } 228 229 // NewLevelDBWithOption explicitly receives LevelDB option to construct a LevelDB object. 230 func NewLevelDBWithOption(dbPath string, ldbOption *opt.Options) (*levelDB, error) { 231 // TODO-Klaytn-Database Replace `NewLevelDB` with `NewLevelDBWithOption` 232 233 localLogger := logger.NewWith("path", dbPath) 234 235 setMinLevelDBOption(ldbOption) 236 237 localLogger.Info("Allocated LevelDB", 238 "WriteBuffer (MB)", ldbOption.WriteBuffer/opt.MiB, "OpenFilesCacheCapacity", ldbOption.OpenFilesCacheCapacity, "BlockCacheCapacity (MB)", ldbOption.BlockCacheCapacity/opt.MiB, 239 "CompactionTableSize (MB)", ldbOption.CompactionTableSize/opt.MiB, "CompactionTableSizeMultiplier", ldbOption.CompactionTableSizeMultiplier, "DisableBufferPool", ldbOption.DisableBufferPool) 240 241 // Open the db and recover any potential corruptions 242 db, err := leveldb.OpenFile(dbPath, ldbOption) 243 if _, corrupted := err.(*errors.ErrCorrupted); corrupted { 244 db, err = leveldb.RecoverFile(dbPath, nil) 245 } 246 // (Re)check for errors and abort if opening of the db failed 247 if err != nil { 248 return nil, err 249 } 250 return &levelDB{ 251 fn: dbPath, 252 db: db, 253 logger: localLogger, 254 }, nil 255 } 256 257 func (db *levelDB) Type() DBType { 258 return LevelDB 259 } 260 261 // Path returns the path to the database directory. 262 func (db *levelDB) Path() string { 263 return db.fn 264 } 265 266 // Put puts the given key / value to the queue 267 func (db *levelDB) Put(key []byte, value []byte) error { 268 // Generate the data to write to disk, update the meter and write 269 // value = rle.Compress(value) 270 if db.perfCheck { 271 start := time.Now() 272 err := db.put(key, value) 273 db.putTimer.Update(time.Since(start)) 274 return err 275 } 276 return db.put(key, value) 277 } 278 279 func (db *levelDB) put(key []byte, value []byte) error { 280 return db.db.Put(key, value, nil) 281 } 282 283 func (db *levelDB) Has(key []byte) (bool, error) { 284 return db.db.Has(key, nil) 285 } 286 287 // Get returns the given key if it's present. 288 func (db *levelDB) Get(key []byte) ([]byte, error) { 289 if db.perfCheck { 290 start := time.Now() 291 val, err := db.get(key) 292 db.getTimer.Update(time.Since(start)) 293 return val, err 294 } 295 return db.get(key) 296 // return rle.Decompress(dat) 297 } 298 299 func (db *levelDB) get(key []byte) ([]byte, error) { 300 dat, err := db.db.Get(key, nil) 301 if err != nil { 302 if err == leveldb.ErrNotFound { 303 return nil, dataNotFoundErr 304 } 305 return nil, err 306 } 307 return dat, nil 308 } 309 310 // Delete deletes the key from the queue and database 311 func (db *levelDB) Delete(key []byte) error { 312 // Execute the actual operation 313 return db.db.Delete(key, nil) 314 } 315 316 // NewIterator creates a binary-alphabetical iterator over a subset 317 // of database content with a particular key prefix, starting at a particular 318 // initial key (or after, if it does not exist). 319 func (db *levelDB) NewIterator(prefix []byte, start []byte) Iterator { 320 return db.db.NewIterator(bytesPrefixRange(prefix, start), nil) 321 } 322 323 func (db *levelDB) Close() { 324 // Stop the metrics collection to avoid internal database races 325 db.quitLock.Lock() 326 defer db.quitLock.Unlock() 327 328 if db.quitChan != nil { 329 errc := make(chan error) 330 db.quitChan <- errc 331 if err := <-errc; err != nil { 332 db.logger.Error("Metrics collection failed", "err", err) 333 } 334 db.quitChan = nil 335 } 336 err := db.db.Close() 337 if err == nil { 338 db.logger.Info("Database closed") 339 } else { 340 db.logger.Error("Failed to close database", "err", err) 341 } 342 } 343 344 func (db *levelDB) Stat(property string) (string, error) { 345 if property == "" { 346 property = "leveldb.stats" 347 } else if !strings.HasPrefix(property, "leveldb.") { 348 property = "leveldb." + property 349 } 350 return db.db.GetProperty(property) 351 } 352 353 func (db *levelDB) Compact(start []byte, limit []byte) error { 354 return db.db.CompactRange(util.Range{Start: start, Limit: limit}) 355 } 356 357 // Meter configures the database metrics collectors and 358 func (db *levelDB) Meter(prefix string) { 359 db.prefix = prefix 360 361 // Initialize all the metrics collector at the requested prefix 362 db.writeDelayCountMeter = metrics.NewRegisteredMeter(prefix+"writedelay/count", nil) 363 db.writeDelayDurationMeter = metrics.NewRegisteredMeter(prefix+"writedelay/duration", nil) 364 db.aliveSnapshotsMeter = metrics.NewRegisteredMeter(prefix+"snapshots", nil) 365 db.aliveIteratorsMeter = metrics.NewRegisteredMeter(prefix+"iterators", nil) 366 db.compTimer = klaytnmetrics.NewRegisteredHybridTimer(prefix+"compaction/time", nil) 367 db.compReadMeter = metrics.NewRegisteredMeter(prefix+"compaction/read", nil) 368 db.compWriteMeter = metrics.NewRegisteredMeter(prefix+"compaction/write", nil) 369 db.diskReadMeter = metrics.NewRegisteredMeter(prefix+"disk/read", nil) 370 db.diskWriteMeter = metrics.NewRegisteredMeter(prefix+"disk/write", nil) 371 db.blockCacheGauge = metrics.NewRegisteredGauge(prefix+"blockcache", nil) 372 373 db.openedTablesCountMeter = metrics.NewRegisteredMeter(prefix+"opendedtables", nil) 374 375 db.getTimer = klaytnmetrics.NewRegisteredHybridTimer(prefix+"get/time", nil) 376 db.putTimer = klaytnmetrics.NewRegisteredHybridTimer(prefix+"put/time", nil) 377 db.batchWriteTimer = klaytnmetrics.NewRegisteredHybridTimer(prefix+"batchwrite/time", nil) 378 379 db.memCompGauge = metrics.NewRegisteredGauge(prefix+"compact/memory", nil) 380 db.level0CompGauge = metrics.NewRegisteredGauge(prefix+"compact/level0", nil) 381 db.nonlevel0CompGauge = metrics.NewRegisteredGauge(prefix+"compact/nonlevel0", nil) 382 db.seekCompGauge = metrics.NewRegisteredGauge(prefix+"compact/seek", nil) 383 384 // Short circuit metering if the metrics system is disabled 385 // Above meters are initialized by NilMeter if metricutils.Enabled == false 386 if !metricutils.Enabled { 387 return 388 } 389 390 // Create a quit channel for the periodic collector and run it 391 db.quitLock.Lock() 392 db.quitChan = make(chan chan error) 393 db.quitLock.Unlock() 394 395 go db.meter(3 * time.Second) 396 } 397 398 // meter periodically retrieves internal leveldb counters and reports them to 399 // the metrics subsystem. 400 // 401 // This is how a stats table look like (currently): 402 // Compactions 403 // Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB) 404 // -------+------------+---------------+---------------+---------------+--------------- 405 // 0 | 0 | 0.00000 | 1.27969 | 0.00000 | 12.31098 406 // 1 | 85 | 109.27913 | 28.09293 | 213.92493 | 214.26294 407 // 2 | 523 | 1000.37159 | 7.26059 | 66.86342 | 66.77884 408 // 3 | 570 | 1113.18458 | 0.00000 | 0.00000 | 0.00000 409 // 410 // This is how the iostats look like (currently): 411 // Read(MB):3895.04860 Write(MB):3654.64712 412 func (db *levelDB) meter(refresh time.Duration) { 413 s := new(leveldb.DBStats) 414 415 // Write delay related stats 416 var prevWriteDelayCount int32 417 var prevWriteDelayDuration time.Duration 418 419 // Alive snapshots/iterators 420 var prevAliveSnapshots, prevAliveIterators int32 421 422 // Compaction related stats 423 var prevCompRead, prevCompWrite int64 424 var prevCompTime time.Duration 425 426 // IO related stats 427 var prevRead, prevWrite uint64 428 429 var ( 430 errc chan error 431 merr error 432 ) 433 434 // Keep collecting stats unless an error occurs 435 hasError: 436 for { 437 merr = db.db.Stats(s) 438 if merr != nil { 439 break 440 } 441 // Write delay related stats 442 db.writeDelayCountMeter.Mark(int64(s.WriteDelayCount - prevWriteDelayCount)) 443 db.writeDelayDurationMeter.Mark(int64(s.WriteDelayDuration - prevWriteDelayDuration)) 444 prevWriteDelayCount, prevWriteDelayDuration = s.WriteDelayCount, s.WriteDelayDuration 445 446 // Alive snapshots/iterators 447 db.aliveSnapshotsMeter.Mark(int64(s.AliveSnapshots - prevAliveSnapshots)) 448 db.aliveIteratorsMeter.Mark(int64(s.AliveIterators - prevAliveIterators)) 449 prevAliveSnapshots, prevAliveIterators = s.AliveSnapshots, s.AliveIterators 450 451 // Compaction related stats 452 var currCompRead, currCompWrite int64 453 var currCompTime time.Duration 454 for i := 0; i < len(s.LevelDurations); i++ { 455 currCompTime += s.LevelDurations[i] 456 currCompRead += s.LevelRead[i] 457 currCompWrite += s.LevelWrite[i] 458 459 db.updateLevelStats(s, i) 460 } 461 db.compTimer.Update(currCompTime - prevCompTime) 462 db.compReadMeter.Mark(currCompRead - prevCompRead) 463 db.compWriteMeter.Mark(currCompWrite - prevCompWrite) 464 prevCompTime, prevCompRead, prevCompWrite = currCompTime, currCompRead, currCompWrite 465 466 // IO related stats 467 currRead, currWrite := s.IORead, s.IOWrite 468 db.diskReadMeter.Mark(int64(currRead - prevRead)) 469 db.diskWriteMeter.Mark(int64(currWrite - prevWrite)) 470 prevRead, prevWrite = currRead, currWrite 471 472 // BlockCache/OpenedTables related stats 473 db.blockCacheGauge.Update(int64(s.BlockCacheSize)) 474 db.openedTablesCountMeter.Mark(int64(s.OpenedTablesCount)) 475 476 // Compaction related stats 477 db.memCompGauge.Update(int64(s.MemComp)) 478 db.level0CompGauge.Update(int64(s.Level0Comp)) 479 db.nonlevel0CompGauge.Update(int64(s.NonLevel0Comp)) 480 db.seekCompGauge.Update(int64(s.SeekComp)) 481 482 // Sleep a bit, then repeat the stats collection 483 select { 484 case errc = <-db.quitChan: 485 // Quit requesting, stop hammering the database 486 break hasError 487 case <-time.After(refresh): 488 // Timeout, gather a new set of stats 489 } 490 } 491 492 if errc == nil { 493 errc = <-db.quitChan 494 } 495 errc <- merr 496 } 497 498 // updateLevelStats collects level-wise stats. 499 func (db *levelDB) updateLevelStats(s *leveldb.DBStats, lv int) { 500 // dynamically creates a new metrics for a new level 501 if len(db.levelSizesGauge) <= lv { 502 prefix := db.prefix + fmt.Sprintf("level%v/", lv) 503 db.levelSizesGauge = append(db.levelSizesGauge, metrics.NewRegisteredGauge(prefix+"size", nil)) 504 db.levelTablesGauge = append(db.levelTablesGauge, metrics.NewRegisteredGauge(prefix+"tables", nil)) 505 db.levelReadGauge = append(db.levelReadGauge, metrics.NewRegisteredGauge(prefix+"read", nil)) 506 db.levelWriteGauge = append(db.levelWriteGauge, metrics.NewRegisteredGauge(prefix+"write", nil)) 507 db.levelDurationsGauge = append(db.levelDurationsGauge, metrics.NewRegisteredGauge(prefix+"duration", nil)) 508 } 509 510 db.levelSizesGauge[lv].Update(s.LevelSizes[lv]) 511 db.levelTablesGauge[lv].Update(int64(s.LevelTablesCounts[lv])) 512 db.levelReadGauge[lv].Update(s.LevelRead[lv]) 513 db.levelWriteGauge[lv].Update(s.LevelWrite[lv]) 514 db.levelDurationsGauge[lv].Update(int64(s.LevelDurations[lv])) 515 } 516 517 func (db *levelDB) TryCatchUpWithPrimary() error { 518 return nil 519 } 520 521 func (db *levelDB) NewBatch() Batch { 522 return &ldbBatch{b: new(leveldb.Batch), ldb: db} 523 } 524 525 // ldbBatch is a write-only leveldb batch that commits changes to its host database 526 // when Write is called. A batch cannot be used concurrently. 527 type ldbBatch struct { 528 b *leveldb.Batch 529 ldb *levelDB 530 size int 531 } 532 533 // Put inserts the given value into the batch for later committing. 534 func (b *ldbBatch) Put(key, value []byte) error { 535 b.b.Put(key, value) 536 b.size += len(value) 537 return nil 538 } 539 540 // Delete inserts the a key removal into the batch for later committing. 541 func (b *ldbBatch) Delete(key []byte) error { 542 b.b.Delete(key) 543 b.size++ 544 return nil 545 } 546 547 // Write flushes any accumulated data to disk. 548 func (b *ldbBatch) Write() error { 549 if b.ldb.perfCheck { 550 start := time.Now() 551 err := b.write() 552 b.ldb.batchWriteTimer.Update(time.Since(start)) 553 return err 554 } 555 return b.write() 556 } 557 558 func (b *ldbBatch) write() error { 559 return b.ldb.db.Write(b.b, nil) 560 } 561 562 // ValueSize retrieves the amount of data queued up for writing. 563 func (b *ldbBatch) ValueSize() int { 564 return b.size 565 } 566 567 // Reset resets the batch for reuse. 568 func (b *ldbBatch) Reset() { 569 b.b.Reset() 570 b.size = 0 571 } 572 573 func (b *ldbBatch) Release() { 574 // nothing to do with ldbBatch 575 } 576 577 // bytesPrefixRange returns key range that satisfy 578 // - the given prefix, and 579 // - the given seek position 580 func bytesPrefixRange(prefix, start []byte) *util.Range { 581 r := util.BytesPrefix(prefix) 582 r.Start = append(r.Start, start...) 583 return r 584 } 585 586 // Replay replays the batch contents. 587 func (b *ldbBatch) Replay(w KeyValueWriter) error { 588 return b.b.Replay(&replayer{writer: w}) 589 } 590 591 // replayer is a small wrapper to implement the correct replay methods. 592 type replayer struct { 593 writer KeyValueWriter 594 failure error 595 } 596 597 // Put inserts the given value into the key-value data store. 598 func (r *replayer) Put(key, value []byte) { 599 // If the replay already failed, stop executing ops 600 if r.failure != nil { 601 return 602 } 603 r.failure = r.writer.Put(key, value) 604 } 605 606 // Delete removes the key from the key-value data store. 607 func (r *replayer) Delete(key []byte) { 608 // If the replay already failed, stop executing ops 609 if r.failure != nil { 610 return 611 } 612 r.failure = r.writer.Delete(key) 613 }