github.com/okex/exchain@v1.8.0/libs/tendermint/store/store.go (about) 1 package store 2 3 import ( 4 "bytes" 5 "fmt" 6 "strconv" 7 "strings" 8 "sync" 9 10 "github.com/tendermint/go-amino" 11 12 "github.com/pkg/errors" 13 14 db "github.com/okex/exchain/libs/tm-db" 15 dbm "github.com/okex/exchain/libs/tm-db" 16 17 "github.com/okex/exchain/libs/tendermint/types" 18 ) 19 20 /* 21 BlockStore is a simple low level store for blocks. 22 23 There are three types of information stored: 24 - BlockMeta: Meta information about each block 25 - Block part: Parts of each block, aggregated w/ PartSet 26 - Commit: The commit part of each block, for gossiping precommit votes 27 28 Currently the precommit signatures are duplicated in the Block parts as 29 well as the Commit. In the future this may change, perhaps by moving 30 the Commit data outside the Block. (TODO) 31 32 The store can be assumed to contain all contiguous blocks between base and height (inclusive). 33 34 // NOTE: BlockStore methods will panic if they encounter errors 35 // deserializing loaded data, indicating probable corruption on disk. 36 */ 37 type BlockStore struct { 38 db dbm.DB 39 40 mtx sync.RWMutex 41 base int64 42 height int64 43 } 44 45 // NewBlockStore returns a new BlockStore with the given DB, 46 // initialized to the last height that was committed to the DB. 47 func NewBlockStore(db dbm.DB) *BlockStore { 48 bsjson := LoadBlockStoreStateJSON(db) 49 return &BlockStore{ 50 base: bsjson.Base, 51 height: bsjson.Height, 52 db: db, 53 } 54 } 55 56 // Base returns the first known contiguous block height, or 0 for empty block stores. 57 func (bs *BlockStore) Base() int64 { 58 bs.mtx.RLock() 59 defer bs.mtx.RUnlock() 60 return bs.base 61 } 62 63 // Height returns the last known contiguous block height, or 0 for empty block stores. 64 func (bs *BlockStore) Height() int64 { 65 bs.mtx.RLock() 66 defer bs.mtx.RUnlock() 67 return bs.height 68 } 69 70 // Size returns the number of blocks in the block store. 71 func (bs *BlockStore) Size() int64 { 72 bs.mtx.RLock() 73 defer bs.mtx.RUnlock() 74 if bs.height == 0 { 75 return 0 76 } 77 return bs.height - bs.base + 1 78 } 79 80 var blockLoadBufPool = &sync.Pool{ 81 New: func() interface{} { 82 return &[2]bytes.Buffer{} 83 }, 84 } 85 86 // LoadBlock returns the block with the given height. 87 // If no block is found for that height, it returns nil. 88 func (bs *BlockStore) LoadBlock(height int64) *types.Block { 89 b, _ := bs.LoadBlockWithExInfo(height) 90 return b 91 } 92 93 // LoadBlockWithExInfo returns the block with the given height. 94 // and the BlockPartInfo is used to make block parts 95 func (bs *BlockStore) LoadBlockWithExInfo(height int64) (*types.Block, *types.BlockExInfo) { 96 bufs := blockLoadBufPool.Get().(*[2]bytes.Buffer) 97 defer blockLoadBufPool.Put(bufs) 98 99 loadBuf, uncompressedBuf := &bufs[0], &bufs[1] 100 101 loadBuf.Reset() 102 uncompressedBuf.Reset() 103 104 info := bs.loadBlockPartsBytesTo(height, loadBuf, uncompressedBuf) 105 if loadBuf.Len() == 0 { 106 return nil, nil 107 } 108 if !info.IsCompressed() { 109 return bs.unmarshalBlockByBytes(loadBuf.Bytes()), &info 110 } else { 111 return bs.unmarshalBlockByBytes(uncompressedBuf.Bytes()), &info 112 } 113 } 114 115 // unmarshalBlockByBytes returns the block with the given block parts bytes 116 func (bs *BlockStore) unmarshalBlockByBytes(blockBytes []byte) *types.Block { 117 var block = new(types.Block) 118 bz, err := amino.GetBinaryBareFromBinaryLengthPrefixed(blockBytes) 119 if err == nil { 120 err = block.UnmarshalFromAmino(cdc, bz) 121 } 122 if err != nil { 123 block = new(types.Block) 124 err = cdc.UnmarshalBinaryLengthPrefixed(blockBytes, block) 125 if err != nil { 126 // NOTE: The existence of meta should imply the existence of the 127 // block. So, make sure meta is only saved after blocks are saved. 128 panic(errors.Wrap(err, fmt.Sprintf("Error reading block, height:%d", block.Height))) 129 } 130 } 131 return block 132 } 133 134 // LoadBlockByHash returns the block with the given hash. 135 // If no block is found for that hash, it returns nil. 136 // Panics if it fails to parse height associated with the given hash. 137 func (bs *BlockStore) LoadBlockByHash(hash []byte) *types.Block { 138 bz, err := bs.db.Get(calcBlockHashKey(hash)) 139 if err != nil { 140 panic(err) 141 } 142 if len(bz) == 0 { 143 return nil 144 } 145 146 s := string(bz) 147 height, err := strconv.ParseInt(s, 10, 64) 148 149 if err != nil { 150 panic(errors.Wrapf(err, "failed to extract height from %s", s)) 151 } 152 return bs.LoadBlock(height) 153 } 154 155 func loadBlockPartFromBytes(bz []byte) *types.Part { 156 if len(bz) == 0 { 157 return nil 158 } 159 var part = new(types.Part) 160 err := part.UnmarshalFromAmino(cdc, bz) 161 if err != nil { 162 part = new(types.Part) 163 err = cdc.UnmarshalBinaryBare(bz, part) 164 if err != nil { 165 panic(errors.Wrap(err, "Error reading block part")) 166 } 167 } 168 return part 169 } 170 171 // LoadBlockPart returns the Part at the given index 172 // from the block at the given height. 173 // If no part is found for the given height and index, it returns nil. 174 func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part { 175 v, err := bs.db.GetUnsafeValue(calcBlockPartKey(height, index), func(bz []byte) (interface{}, error) { 176 return loadBlockPartFromBytes(bz), nil 177 }) 178 if err != nil { 179 panic(err) 180 } 181 return v.(*types.Part) 182 } 183 184 func loadBlockPartBytesFromBytesTo(bz []byte, buf *bytes.Buffer) { 185 if len(bz) == 0 { 186 return 187 } 188 lenBefore := buf.Len() 189 err := unmarshalBlockPartBytesTo(bz, buf) 190 if err == nil { 191 return 192 } 193 part := loadBlockPartFromBytes(bz) 194 buf.Truncate(lenBefore) 195 buf.Write(part.Bytes) 196 } 197 198 func (bs *BlockStore) loadBlockPartBytesTo(height int64, index int, buf *bytes.Buffer) { 199 _, err := bs.db.GetUnsafeValue(calcBlockPartKey(height, index), func(bz []byte) (interface{}, error) { 200 loadBlockPartBytesFromBytesTo(bz, buf) 201 return nil, nil 202 }) 203 if err != nil { 204 panic(err) 205 } 206 } 207 208 // loadBlockPartsBytesTo load all block parts bytes to the given buffer, 209 // buf *Buffer stores the original block parts bytes, 210 // uncompressed *Buffer stores the uncompressed block parts bytes if block is compressed 211 func (bs *BlockStore) loadBlockPartsBytesTo(height int64, buf *bytes.Buffer, uncompressed *bytes.Buffer) types.BlockExInfo { 212 var blockMeta = bs.LoadBlockMeta(height) 213 if blockMeta == nil { 214 return types.BlockExInfo{} 215 } 216 blockPartSize, bufBeforeLen := 0, buf.Len() 217 for i := 0; i < blockMeta.BlockID.PartsHeader.Total; i++ { 218 bs.loadBlockPartBytesTo(height, i, buf) 219 if i == 0 { 220 blockPartSize = buf.Len() - bufBeforeLen 221 } 222 } 223 224 // uncompress if the block part bytes was created by compress block 225 compressSign, err := types.UncompressBlockFromBytesTo(buf.Bytes(), uncompressed) 226 if err != nil { 227 panic(errors.Wrap(err, "failed to uncompress block")) 228 } 229 230 return types.BlockExInfo{ 231 BlockCompressType: compressSign / types.CompressDividing, 232 BlockCompressFlag: compressSign % types.CompressDividing, 233 BlockPartSize: blockPartSize, 234 } 235 } 236 237 func decodeBlockMeta(bz []byte) (*types.BlockMeta, error) { 238 if len(bz) == 0 { 239 return nil, nil 240 } 241 var blockMeta = new(types.BlockMeta) 242 err := blockMeta.UnmarshalFromAmino(cdc, bz) 243 if err != nil { 244 err = cdc.UnmarshalBinaryBare(bz, blockMeta) 245 if err != nil { 246 return nil, errors.Wrap(err, "Error reading block meta") 247 } 248 } 249 return blockMeta, nil 250 } 251 252 // LoadBlockMeta returns the BlockMeta for the given height. 253 // If no block is found for the given height, it returns nil. 254 func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { 255 v, err := bs.db.GetUnsafeValue(calcBlockMetaKey(height), func(bz []byte) (interface{}, error) { 256 return decodeBlockMeta(bz) 257 }) 258 if err != nil { 259 panic(err) 260 } 261 return v.(*types.BlockMeta) 262 } 263 264 // LoadBlockCommit returns the Commit for the given height. 265 // This commit consists of the +2/3 and other Precommit-votes for block at `height`, 266 // and it comes from the block.LastCommit for `height+1`. 267 // If no commit is found for the given height, it returns nil. 268 func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit { 269 var commit = new(types.Commit) 270 bz, err := bs.db.Get(calcBlockCommitKey(height)) 271 if err != nil { 272 panic(err) 273 } 274 if len(bz) == 0 { 275 return nil 276 } 277 err = cdc.UnmarshalBinaryBare(bz, commit) 278 if err != nil { 279 panic(errors.Wrap(err, "Error reading block commit")) 280 } 281 return commit 282 } 283 284 // LoadSeenCommit returns the locally seen Commit for the given height. 285 // This is useful when we've seen a commit, but there has not yet been 286 // a new block at `height + 1` that includes this commit in its block.LastCommit. 287 func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit { 288 var commit = new(types.Commit) 289 bz, err := bs.db.Get(calcSeenCommitKey(height)) 290 if err != nil { 291 panic(err) 292 } 293 if len(bz) == 0 { 294 return nil 295 } 296 err = cdc.UnmarshalBinaryBare(bz, commit) 297 if err != nil { 298 panic(errors.Wrap(err, "Error reading block seen commit")) 299 } 300 return commit 301 } 302 303 // PruneBlocks removes block up to (but not including) a height. It returns number of blocks pruned. 304 func (bs *BlockStore) PruneBlocks(height int64) (uint64, error) { 305 return bs.deleteBatch(height, false) 306 } 307 308 // DeleteBlocksFromTop removes block down to (but not including) a height. It returns number of blocks deleted. 309 func (bs *BlockStore) DeleteBlocksFromTop(height int64) (uint64, error) { 310 return bs.deleteBatch(height, true) 311 } 312 313 func (bs *BlockStore) deleteBatch(height int64, deleteFromTop bool) (uint64, error) { 314 if height <= 0 { 315 return 0, fmt.Errorf("height must be greater than 0") 316 } 317 318 bs.mtx.RLock() 319 top := bs.height 320 base := bs.base 321 bs.mtx.RUnlock() 322 if height > top { 323 return 0, fmt.Errorf("cannot delete beyond the latest height %v, delete from top %t", top, deleteFromTop) 324 } 325 if height < base { 326 return 0, fmt.Errorf("cannot delete to height %v, it is lower than base height %v, delete from top %t", 327 height, base, deleteFromTop) 328 } 329 330 deleted := uint64(0) 331 batch := bs.db.NewBatch() 332 defer batch.Close() 333 flush := func(batch db.Batch, height int64) error { 334 // We can't trust batches to be atomic, so update base first to make sure noone 335 // tries to access missing blocks. 336 bs.mtx.Lock() 337 if deleteFromTop { 338 bs.height = height 339 } else { 340 bs.base = height 341 } 342 bs.mtx.Unlock() 343 bs.saveState() 344 345 err := batch.WriteSync() 346 if err != nil { 347 batch.Close() 348 return fmt.Errorf("failed to delete to height %v, delete from top %t: %w", height, deleteFromTop, err) 349 } 350 batch.Close() 351 return nil 352 } 353 354 deleteFn := func(h int64) error { 355 meta := bs.LoadBlockMeta(h) 356 if meta == nil { // assume already deleted 357 return nil 358 } 359 batch.Delete(calcBlockMetaKey(h)) 360 batch.Delete(calcBlockHashKey(meta.BlockID.Hash)) 361 batch.Delete(calcBlockCommitKey(h)) 362 batch.Delete(calcSeenCommitKey(h)) 363 for p := 0; p < meta.BlockID.PartsHeader.Total; p++ { 364 batch.Delete(calcBlockPartKey(h, p)) 365 } 366 deleted++ 367 368 // flush every 1000 blocks to avoid batches becoming too large 369 if deleted%1000 == 0 && deleted > 0 { 370 err := flush(batch, h) 371 if err != nil { 372 return err 373 } 374 batch = bs.db.NewBatch() 375 } 376 return nil 377 } 378 379 if deleteFromTop { 380 for h := top; h > height; h-- { 381 err := deleteFn(h) 382 if err != nil { 383 return 0, err 384 } 385 } 386 } else { 387 for h := base; h < height; h++ { 388 err := deleteFn(h) 389 if err != nil { 390 return 0, err 391 } 392 } 393 } 394 395 err := flush(batch, height) 396 if err != nil { 397 return 0, err 398 } 399 return deleted, nil 400 } 401 402 // SaveBlock persists the given block, blockParts, and seenCommit to the underlying db. 403 // blockParts: Must be parts of the block 404 // seenCommit: The +2/3 precommits that were seen which committed at height. 405 // 406 // If all the nodes restart after committing a block, 407 // we need this to reload the precommits to catch-up nodes to the 408 // most recent height. Otherwise they'd stall at H-1. 409 func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { 410 batch := bs.db.NewBatch() 411 defer batch.Close() 412 413 if block == nil { 414 panic("BlockStore can only save a non-nil block") 415 } 416 417 height := block.Height 418 hash := block.Hash() 419 420 if g, w := height, bs.Height()+1; bs.Base() > 0 && g != w { 421 panic(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g)) 422 } 423 if !blockParts.IsComplete() { 424 panic(fmt.Sprintf("BlockStore can only save complete block part sets")) 425 } 426 427 // Save block meta 428 blockMeta := types.NewBlockMeta(block, blockParts) 429 metaBytes := cdc.MustMarshalBinaryBare(blockMeta) 430 batch.Set(calcBlockMetaKey(height), metaBytes) 431 batch.Set(calcBlockHashKey(hash), []byte(fmt.Sprintf("%d", height))) 432 433 // Save block parts 434 for i := 0; i < blockParts.Total(); i++ { 435 part := blockParts.GetPart(i) 436 bs.saveBlockPart(batch, height, i, part) 437 } 438 439 // Save block commit (duplicate and separate from the Block) 440 blockCommitBytes := cdc.MustMarshalBinaryBare(block.LastCommit) 441 batch.Set(calcBlockCommitKey(height-1), blockCommitBytes) 442 443 // Save seen commit (seen +2/3 precommits for block) 444 // NOTE: we can delete this at a later height 445 seenCommitBytes := cdc.MustMarshalBinaryBare(seenCommit) 446 batch.Set(calcSeenCommitKey(height), seenCommitBytes) 447 448 // Done! 449 bs.mtx.Lock() 450 bs.height = height 451 if bs.base == 0 { 452 bs.base = height 453 } 454 bs.mtx.Unlock() 455 456 // Save new BlockStoreStateJSON descriptor 457 bs.saveStateBatch(batch) 458 459 // Flush 460 batch.WriteSync() 461 } 462 463 func (bs *BlockStore) saveBlockPart(batch db.Batch, height int64, index int, part *types.Part) { 464 partBytes := cdc.MustMarshalBinaryBare(part) 465 batch.Set(calcBlockPartKey(height, index), partBytes) 466 } 467 468 func (bs *BlockStore) saveState() { 469 bs.mtx.RLock() 470 bsJSON := BlockStoreStateJSON{ 471 Base: bs.base, 472 Height: bs.height, 473 } 474 bs.mtx.RUnlock() 475 bsJSON.Save(bs.db) 476 } 477 478 func (bs *BlockStore) saveStateBatch(batch db.Batch) { 479 bs.mtx.RLock() 480 bsJSON := BlockStoreStateJSON{ 481 Base: bs.base, 482 Height: bs.height, 483 } 484 bs.mtx.RUnlock() 485 bsJSON.saveBatch(batch) 486 } 487 488 //----------------------------------------------------------------------------- 489 490 func calcBlockMetaKey(height int64) []byte { 491 return amino.StrToBytes(strings.Join([]string{"H", strconv.FormatInt(height, 10)}, ":")) 492 } 493 494 func calcBlockPartKey(height int64, partIndex int) []byte { 495 return amino.StrToBytes(strings.Join([]string{"P", strconv.FormatInt(height, 10), strconv.Itoa(partIndex)}, ":")) 496 } 497 498 func calcBlockCommitKey(height int64) []byte { 499 return amino.StrToBytes(strings.Join([]string{"C", strconv.FormatInt(height, 10)}, ":")) 500 } 501 502 func calcSeenCommitKey(height int64) []byte { 503 return amino.StrToBytes(strings.Join([]string{"SC", strconv.FormatInt(height, 10)}, ":")) 504 } 505 506 func calcBlockHashKey(hash []byte) []byte { 507 return amino.StrToBytes(strings.Join([]string{"BH", amino.HexEncodeToString(hash)}, ":")) 508 } 509 510 //----------------------------------------------------------------------------- 511 512 var blockStoreKey = []byte("blockStore") 513 514 // BlockStoreStateJSON is the block store state JSON structure. 515 type BlockStoreStateJSON struct { 516 Base int64 `json:"base"` 517 Height int64 `json:"height"` 518 } 519 520 // Save persists the blockStore state to the database as JSON. 521 func (bsj BlockStoreStateJSON) Save(db dbm.DB) { 522 bytes, err := cdc.MarshalJSON(bsj) 523 if err != nil { 524 panic(fmt.Sprintf("Could not marshal state bytes: %v", err)) 525 } 526 db.SetSync(blockStoreKey, bytes) 527 } 528 529 // Save persists the blockStore state to the database as JSON. 530 func (bsj BlockStoreStateJSON) saveBatch(batch dbm.Batch) { 531 bytes, err := cdc.MarshalJSON(bsj) 532 if err != nil { 533 panic(fmt.Sprintf("Could not marshal state bytes: %v", err)) 534 } 535 batch.Set(blockStoreKey, bytes) 536 } 537 538 // LoadBlockStoreStateJSON returns the BlockStoreStateJSON as loaded from disk. 539 // If no BlockStoreStateJSON was previously persisted, it returns the zero value. 540 func LoadBlockStoreStateJSON(db dbm.DB) BlockStoreStateJSON { 541 bytes, err := db.Get(blockStoreKey) 542 if err != nil { 543 panic(err) 544 } 545 if len(bytes) == 0 { 546 return BlockStoreStateJSON{ 547 Base: 0, 548 Height: types.GetStartBlockHeight(), 549 } 550 } 551 bsj := BlockStoreStateJSON{} 552 err = cdc.UnmarshalJSON(bytes, &bsj) 553 if err != nil { 554 panic(fmt.Sprintf("Could not unmarshal bytes: %X", bytes)) 555 } 556 // Backwards compatibility with persisted data from before Base existed. 557 if bsj.Height > 0 && bsj.Base == 0 { 558 bsj.Base = 1 559 } 560 return bsj 561 }