github.com/darrenli6/fabric-sdk-example@v0.0.0-20220109053535-94b13b56df8c/common/ledger/blkstorage/fsblkstorage/blockfile_mgr.go (about) 1 /* 2 Copyright IBM Corp. 2016 All Rights Reserved. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package fsblkstorage 18 19 import ( 20 "fmt" 21 "math" 22 "sync" 23 "sync/atomic" 24 25 "github.com/davecgh/go-spew/spew" 26 27 "github.com/golang/protobuf/proto" 28 "github.com/hyperledger/fabric/common/flogging" 29 "github.com/hyperledger/fabric/common/ledger/blkstorage" 30 "github.com/hyperledger/fabric/common/ledger/util" 31 "github.com/hyperledger/fabric/common/ledger/util/leveldbhelper" 32 "github.com/hyperledger/fabric/protos/common" 33 "github.com/hyperledger/fabric/protos/peer" 34 putil "github.com/hyperledger/fabric/protos/utils" 35 ) 36 37 var logger = flogging.MustGetLogger("fsblkstorage") 38 39 const ( 40 blockfilePrefix = "blockfile_" 41 ) 42 43 var ( 44 blkMgrInfoKey = []byte("blkMgrInfo") 45 ) 46 47 type blockfileMgr struct { 48 rootDir string 49 conf *Conf 50 db *leveldbhelper.DBHandle 51 index index 52 cpInfo *checkpointInfo 53 cpInfoCond *sync.Cond 54 currentFileWriter *blockfileWriter 55 bcInfo atomic.Value 56 } 57 58 /* 59 Creates a new manager that will manage the files used for block persistence. 60 This manager manages the file system FS including 61 -- the directory where the files are stored 62 -- the individual files where the blocks are stored 63 -- the checkpoint which tracks the latest file being persisted to 64 -- the index which tracks what block and transaction is in what file 65 When a new blockfile manager is started (i.e. only on start-up), it checks 66 if this start-up is the first time the system is coming up or is this a restart 67 of the system. 68 69 The blockfile manager stores blocks of data into a file system. That file 70 storage is done by creating sequentially numbered files of a configured size 71 i.e blockfile_000000, blockfile_000001, etc.. 72 73 Each transcation in a block is stored with information about the number of 74 bytes in that transaction 75 Adding txLoc [fileSuffixNum=0, offset=3, bytesLength=104] for tx [1:0] to index 76 Adding txLoc [fileSuffixNum=0, offset=107, bytesLength=104] for tx [1:1] to index 77 Each block is stored with the total encoded length of that block as well as the 78 tx location offsets. 79 80 Remember that these steps are only done once at start-up of the system. 81 At start up a new manager: 82 *) Checks if the directory for storing files exists, if not creates the dir 83 *) Checks if the key value database exists, if not creates one 84 (will create a db dir) 85 *) Determines the checkpoint information (cpinfo) used for storage 86 -- Loads from db if exist, if not instantiate a new cpinfo 87 -- If cpinfo was loaded from db, compares to FS 88 -- If cpinfo and file system are not in sync, syncs cpInfo from FS 89 *) Starts a new file writer 90 -- truncates file per cpinfo to remove any excess past last block 91 *) Determines the index information used to find tx and blocks in 92 the file blkstorage 93 -- Instantiates a new blockIdxInfo 94 -- Loads the index from the db if exists 95 -- syncIndex comparing the last block indexed to what is in the FS 96 -- If index and file system are not in sync, syncs index from the FS 97 *) Updates blockchain info used by the APIs 98 */ 99 func newBlockfileMgr(id string, conf *Conf, indexConfig *blkstorage.IndexConfig, indexStore *leveldbhelper.DBHandle) *blockfileMgr { 100 logger.Debugf("newBlockfileMgr() initializing file-based block storage for ledger: %s ", id) 101 //Determine the root directory for the blockfile storage, if it does not exist create it 102 rootDir := conf.getLedgerBlockDir(id) 103 _, err := util.CreateDirIfMissing(rootDir) 104 if err != nil { 105 panic(fmt.Sprintf("Error: %s", err)) 106 } 107 // Instantiate the manager, i.e. blockFileMgr structure 108 mgr := &blockfileMgr{rootDir: rootDir, conf: conf, db: indexStore} 109 110 // cp = checkpointInfo, retrieve from the database the file suffix or number of where blocks were stored. 111 // It also retrieves the current size of that file and the last block number that was written to that file. 112 // At init checkpointInfo:latestFileChunkSuffixNum=[0], latestFileChunksize=[0], lastBlockNumber=[0] 113 cpInfo, err := mgr.loadCurrentInfo() 114 if err != nil { 115 panic(fmt.Sprintf("Could not get block file info for current block file from db: %s", err)) 116 } 117 if cpInfo == nil { 118 logger.Info(`Getting block information from block storage`) 119 if cpInfo, err = constructCheckpointInfoFromBlockFiles(rootDir); err != nil { 120 panic(fmt.Sprintf("Could not build checkpoint info from block files: %s", err)) 121 } 122 logger.Debugf("Info constructed by scanning the blocks dir = %s", spew.Sdump(cpInfo)) 123 } else { 124 logger.Debug(`Synching block information from block storage (if needed)`) 125 syncCPInfoFromFS(rootDir, cpInfo) 126 } 127 err = mgr.saveCurrentInfo(cpInfo, true) 128 if err != nil { 129 panic(fmt.Sprintf("Could not save next block file info to db: %s", err)) 130 } 131 132 //Open a writer to the file identified by the number and truncate it to only contain the latest block 133 // that was completely saved (file system, index, cpinfo, etc) 134 currentFileWriter, err := newBlockfileWriter(deriveBlockfilePath(rootDir, cpInfo.latestFileChunkSuffixNum)) 135 if err != nil { 136 panic(fmt.Sprintf("Could not open writer to current file: %s", err)) 137 } 138 //Truncate the file to remove excess past last block 139 err = currentFileWriter.truncateFile(cpInfo.latestFileChunksize) 140 if err != nil { 141 panic(fmt.Sprintf("Could not truncate current file to known size in db: %s", err)) 142 } 143 144 // Create a new KeyValue store database handler for the blocks index in the keyvalue database 145 mgr.index = newBlockIndex(indexConfig, indexStore) 146 147 // Update the manager with the checkpoint info and the file writer 148 mgr.cpInfo = cpInfo 149 mgr.currentFileWriter = currentFileWriter 150 // Create a checkpoint condition (event) variable, for the goroutine waiting for 151 // or announcing the occurrence of an event. 152 mgr.cpInfoCond = sync.NewCond(&sync.Mutex{}) 153 154 // init BlockchainInfo for external API's 155 bcInfo := &common.BlockchainInfo{ 156 Height: 0, 157 CurrentBlockHash: nil, 158 PreviousBlockHash: nil} 159 160 if !cpInfo.isChainEmpty { 161 //If start up is a restart of an existing storage, sync the index from block storage and update BlockchainInfo for external API's 162 mgr.syncIndex() 163 lastBlockHeader, err := mgr.retrieveBlockHeaderByNumber(cpInfo.lastBlockNumber) 164 if err != nil { 165 panic(fmt.Sprintf("Could not retrieve header of the last block form file: %s", err)) 166 } 167 lastBlockHash := lastBlockHeader.Hash() 168 previousBlockHash := lastBlockHeader.PreviousHash 169 bcInfo = &common.BlockchainInfo{ 170 Height: cpInfo.lastBlockNumber + 1, 171 CurrentBlockHash: lastBlockHash, 172 PreviousBlockHash: previousBlockHash} 173 } 174 mgr.bcInfo.Store(bcInfo) 175 return mgr 176 } 177 178 //cp = checkpointInfo, from the database gets the file suffix and the size of 179 // the file of where the last block was written. Also retrieves contains the 180 // last block number that was written. At init 181 //checkpointInfo:latestFileChunkSuffixNum=[0], latestFileChunksize=[0], lastBlockNumber=[0] 182 func syncCPInfoFromFS(rootDir string, cpInfo *checkpointInfo) { 183 logger.Debugf("Starting checkpoint=%s", cpInfo) 184 //Checks if the file suffix of where the last block was written exists 185 filePath := deriveBlockfilePath(rootDir, cpInfo.latestFileChunkSuffixNum) 186 exists, size, err := util.FileExists(filePath) 187 if err != nil { 188 panic(fmt.Sprintf("Error in checking whether file [%s] exists: %s", filePath, err)) 189 } 190 logger.Debugf("status of file [%s]: exists=[%t], size=[%d]", filePath, exists, size) 191 //Test is !exists because when file number is first used the file does not exist yet 192 //checks that the file exists and that the size of the file is what is stored in cpinfo 193 //status of file [/tmp/tests/ledger/blkstorage/fsblkstorage/blocks/blockfile_000000]: exists=[false], size=[0] 194 if !exists || int(size) == cpInfo.latestFileChunksize { 195 // check point info is in sync with the file on disk 196 return 197 } 198 //Scan the file system to verify that the checkpoint info stored in db is correct 199 _, endOffsetLastBlock, numBlocks, err := scanForLastCompleteBlock( 200 rootDir, cpInfo.latestFileChunkSuffixNum, int64(cpInfo.latestFileChunksize)) 201 if err != nil { 202 panic(fmt.Sprintf("Could not open current file for detecting last block in the file: %s", err)) 203 } 204 cpInfo.latestFileChunksize = int(endOffsetLastBlock) 205 if numBlocks == 0 { 206 return 207 } 208 //Updates the checkpoint info for the actual last block number stored and it's end location 209 if cpInfo.isChainEmpty { 210 cpInfo.lastBlockNumber = uint64(numBlocks - 1) 211 } else { 212 cpInfo.lastBlockNumber += uint64(numBlocks) 213 } 214 cpInfo.isChainEmpty = false 215 logger.Debugf("Checkpoint after updates by scanning the last file segment:%s", cpInfo) 216 } 217 218 func deriveBlockfilePath(rootDir string, suffixNum int) string { 219 return rootDir + "/" + blockfilePrefix + fmt.Sprintf("%06d", suffixNum) 220 } 221 222 func (mgr *blockfileMgr) close() { 223 mgr.currentFileWriter.close() 224 } 225 226 func (mgr *blockfileMgr) moveToNextFile() { 227 cpInfo := &checkpointInfo{ 228 latestFileChunkSuffixNum: mgr.cpInfo.latestFileChunkSuffixNum + 1, 229 latestFileChunksize: 0, 230 lastBlockNumber: mgr.cpInfo.lastBlockNumber} 231 232 nextFileWriter, err := newBlockfileWriter( 233 deriveBlockfilePath(mgr.rootDir, cpInfo.latestFileChunkSuffixNum)) 234 235 if err != nil { 236 panic(fmt.Sprintf("Could not open writer to next file: %s", err)) 237 } 238 mgr.currentFileWriter.close() 239 err = mgr.saveCurrentInfo(cpInfo, true) 240 if err != nil { 241 panic(fmt.Sprintf("Could not save next block file info to db: %s", err)) 242 } 243 mgr.currentFileWriter = nextFileWriter 244 mgr.updateCheckpoint(cpInfo) 245 } 246 247 func (mgr *blockfileMgr) addBlock(block *common.Block) error { 248 if block.Header.Number != mgr.getBlockchainInfo().Height { 249 return fmt.Errorf("Block number should have been %d but was %d", mgr.getBlockchainInfo().Height, block.Header.Number) 250 } 251 blockBytes, info, err := serializeBlock(block) 252 if err != nil { 253 return fmt.Errorf("Error while serializing block: %s", err) 254 } 255 blockHash := block.Header.Hash() 256 //Get the location / offset where each transaction starts in the block and where the block ends 257 txOffsets := info.txOffsets 258 currentOffset := mgr.cpInfo.latestFileChunksize 259 if err != nil { 260 return fmt.Errorf("Error while serializing block: %s", err) 261 } 262 blockBytesLen := len(blockBytes) 263 blockBytesEncodedLen := proto.EncodeVarint(uint64(blockBytesLen)) 264 totalBytesToAppend := blockBytesLen + len(blockBytesEncodedLen) 265 266 //Determine if we need to start a new file since the size of this block 267 //exceeds the amount of space left in the current file 268 if currentOffset+totalBytesToAppend > mgr.conf.maxBlockfileSize { 269 mgr.moveToNextFile() 270 currentOffset = 0 271 } 272 //append blockBytesEncodedLen to the file 273 err = mgr.currentFileWriter.append(blockBytesEncodedLen, false) 274 if err == nil { 275 //append the actual block bytes to the file 276 err = mgr.currentFileWriter.append(blockBytes, true) 277 } 278 if err != nil { 279 truncateErr := mgr.currentFileWriter.truncateFile(mgr.cpInfo.latestFileChunksize) 280 if truncateErr != nil { 281 panic(fmt.Sprintf("Could not truncate current file to known size after an error during block append: %s", err)) 282 } 283 return fmt.Errorf("Error while appending block to file: %s", err) 284 } 285 286 //Update the checkpoint info with the results of adding the new block 287 currentCPInfo := mgr.cpInfo 288 newCPInfo := &checkpointInfo{ 289 latestFileChunkSuffixNum: currentCPInfo.latestFileChunkSuffixNum, 290 latestFileChunksize: currentCPInfo.latestFileChunksize + totalBytesToAppend, 291 isChainEmpty: false, 292 lastBlockNumber: block.Header.Number} 293 //save the checkpoint information in the database 294 if err = mgr.saveCurrentInfo(newCPInfo, false); err != nil { 295 truncateErr := mgr.currentFileWriter.truncateFile(currentCPInfo.latestFileChunksize) 296 if truncateErr != nil { 297 panic(fmt.Sprintf("Error in truncating current file to known size after an error in saving checkpoint info: %s", err)) 298 } 299 return fmt.Errorf("Error while saving current file info to db: %s", err) 300 } 301 302 //Index block file location pointer updated with file suffex and offset for the new block 303 blockFLP := &fileLocPointer{fileSuffixNum: newCPInfo.latestFileChunkSuffixNum} 304 blockFLP.offset = currentOffset 305 // shift the txoffset because we prepend length of bytes before block bytes 306 for _, txOffset := range txOffsets { 307 txOffset.loc.offset += len(blockBytesEncodedLen) 308 } 309 //save the index in the database 310 mgr.index.indexBlock(&blockIdxInfo{ 311 blockNum: block.Header.Number, blockHash: blockHash, 312 flp: blockFLP, txOffsets: txOffsets, metadata: block.Metadata}) 313 314 //update the checkpoint info (for storage) and the blockchain info (for APIs) in the manager 315 mgr.updateCheckpoint(newCPInfo) 316 mgr.updateBlockchainInfo(blockHash, block) 317 return nil 318 } 319 320 func (mgr *blockfileMgr) syncIndex() error { 321 var lastBlockIndexed uint64 322 var indexEmpty bool 323 var err error 324 //from the database, get the last block that was indexed 325 if lastBlockIndexed, err = mgr.index.getLastBlockIndexed(); err != nil { 326 if err != errIndexEmpty { 327 return err 328 } 329 indexEmpty = true 330 } 331 332 //initialize index to file number:zero, offset:zero and blockNum:0 333 startFileNum := 0 334 startOffset := 0 335 skipFirstBlock := false 336 //get the last file that blocks were added to using the checkpoint info 337 endFileNum := mgr.cpInfo.latestFileChunkSuffixNum 338 startingBlockNum := uint64(0) 339 340 //if the index stored in the db has value, update the index information with those values 341 if !indexEmpty { 342 if lastBlockIndexed == mgr.cpInfo.lastBlockNumber { 343 logger.Debug("Both the block files and indices are in sync.") 344 return nil 345 } 346 logger.Debugf("Last block indexed [%d], Last block present in block files [%d]", lastBlockIndexed, mgr.cpInfo.lastBlockNumber) 347 var flp *fileLocPointer 348 if flp, err = mgr.index.getBlockLocByBlockNum(lastBlockIndexed); err != nil { 349 return err 350 } 351 startFileNum = flp.fileSuffixNum 352 startOffset = flp.locPointer.offset 353 skipFirstBlock = true 354 startingBlockNum = lastBlockIndexed + 1 355 } else { 356 logger.Debugf("No block indexed, Last block present in block files=[%d]", mgr.cpInfo.lastBlockNumber) 357 } 358 359 logger.Infof("Start building index from block [%d] to last block [%d]", startingBlockNum, mgr.cpInfo.lastBlockNumber) 360 361 //open a blockstream to the file location that was stored in the index 362 var stream *blockStream 363 if stream, err = newBlockStream(mgr.rootDir, startFileNum, int64(startOffset), endFileNum); err != nil { 364 return err 365 } 366 var blockBytes []byte 367 var blockPlacementInfo *blockPlacementInfo 368 369 if skipFirstBlock { 370 if blockBytes, _, err = stream.nextBlockBytesAndPlacementInfo(); err != nil { 371 return err 372 } 373 if blockBytes == nil { 374 return fmt.Errorf("block bytes for block num = [%d] should not be nil here. The indexes for the block are already present", 375 lastBlockIndexed) 376 } 377 } 378 379 //Should be at the last block already, but go ahead and loop looking for next blockBytes. 380 //If there is another block, add it to the index. 381 //This will ensure block indexes are correct, for example if peer had crashed before indexes got updated. 382 blockIdxInfo := &blockIdxInfo{} 383 for { 384 if blockBytes, blockPlacementInfo, err = stream.nextBlockBytesAndPlacementInfo(); err != nil { 385 return err 386 } 387 if blockBytes == nil { 388 break 389 } 390 info, err := extractSerializedBlockInfo(blockBytes) 391 if err != nil { 392 return err 393 } 394 395 //The blockStartOffset will get applied to the txOffsets prior to indexing within indexBlock(), 396 //therefore just shift by the difference between blockBytesOffset and blockStartOffset 397 numBytesToShift := int(blockPlacementInfo.blockBytesOffset - blockPlacementInfo.blockStartOffset) 398 for _, offset := range info.txOffsets { 399 offset.loc.offset += numBytesToShift 400 } 401 402 //Update the blockIndexInfo with what was actually stored in file system 403 blockIdxInfo.blockHash = info.blockHeader.Hash() 404 blockIdxInfo.blockNum = info.blockHeader.Number 405 blockIdxInfo.flp = &fileLocPointer{fileSuffixNum: blockPlacementInfo.fileNum, 406 locPointer: locPointer{offset: int(blockPlacementInfo.blockStartOffset)}} 407 blockIdxInfo.txOffsets = info.txOffsets 408 blockIdxInfo.metadata = info.metadata 409 410 logger.Debugf("syncIndex() indexing block [%d]", blockIdxInfo.blockNum) 411 if err = mgr.index.indexBlock(blockIdxInfo); err != nil { 412 return err 413 } 414 if blockIdxInfo.blockNum%10000 == 0 { 415 logger.Infof("Indexed block number [%d]", blockIdxInfo.blockNum) 416 } 417 } 418 logger.Infof("Finished building index. Last block indexed [%d]", blockIdxInfo.blockNum) 419 return nil 420 } 421 422 func (mgr *blockfileMgr) getBlockchainInfo() *common.BlockchainInfo { 423 return mgr.bcInfo.Load().(*common.BlockchainInfo) 424 } 425 426 func (mgr *blockfileMgr) updateCheckpoint(cpInfo *checkpointInfo) { 427 mgr.cpInfoCond.L.Lock() 428 defer mgr.cpInfoCond.L.Unlock() 429 mgr.cpInfo = cpInfo 430 logger.Debugf("Broadcasting about update checkpointInfo: %s", cpInfo) 431 mgr.cpInfoCond.Broadcast() 432 } 433 434 func (mgr *blockfileMgr) updateBlockchainInfo(latestBlockHash []byte, latestBlock *common.Block) { 435 currentBCInfo := mgr.getBlockchainInfo() 436 newBCInfo := &common.BlockchainInfo{ 437 Height: currentBCInfo.Height + 1, 438 CurrentBlockHash: latestBlockHash, 439 PreviousBlockHash: latestBlock.Header.PreviousHash} 440 441 mgr.bcInfo.Store(newBCInfo) 442 } 443 444 func (mgr *blockfileMgr) retrieveBlockByHash(blockHash []byte) (*common.Block, error) { 445 logger.Debugf("retrieveBlockByHash() - blockHash = [%#v]", blockHash) 446 loc, err := mgr.index.getBlockLocByHash(blockHash) 447 if err != nil { 448 return nil, err 449 } 450 return mgr.fetchBlock(loc) 451 } 452 453 func (mgr *blockfileMgr) retrieveBlockByNumber(blockNum uint64) (*common.Block, error) { 454 logger.Debugf("retrieveBlockByNumber() - blockNum = [%d]", blockNum) 455 456 // interpret math.MaxUint64 as a request for last block 457 if blockNum == math.MaxUint64 { 458 blockNum = mgr.getBlockchainInfo().Height - 1 459 } 460 461 loc, err := mgr.index.getBlockLocByBlockNum(blockNum) 462 if err != nil { 463 return nil, err 464 } 465 return mgr.fetchBlock(loc) 466 } 467 468 func (mgr *blockfileMgr) retrieveBlockByTxID(txID string) (*common.Block, error) { 469 logger.Debugf("retrieveBlockByTxID() - txID = [%s]", txID) 470 471 loc, err := mgr.index.getBlockLocByTxID(txID) 472 473 if err != nil { 474 return nil, err 475 } 476 return mgr.fetchBlock(loc) 477 } 478 479 func (mgr *blockfileMgr) retrieveTxValidationCodeByTxID(txID string) (peer.TxValidationCode, error) { 480 logger.Debugf("retrieveTxValidationCodeByTxID() - txID = [%s]", txID) 481 return mgr.index.getTxValidationCodeByTxID(txID) 482 } 483 484 func (mgr *blockfileMgr) retrieveBlockHeaderByNumber(blockNum uint64) (*common.BlockHeader, error) { 485 logger.Debugf("retrieveBlockHeaderByNumber() - blockNum = [%d]", blockNum) 486 loc, err := mgr.index.getBlockLocByBlockNum(blockNum) 487 if err != nil { 488 return nil, err 489 } 490 blockBytes, err := mgr.fetchBlockBytes(loc) 491 if err != nil { 492 return nil, err 493 } 494 info, err := extractSerializedBlockInfo(blockBytes) 495 if err != nil { 496 return nil, err 497 } 498 return info.blockHeader, nil 499 } 500 501 func (mgr *blockfileMgr) retrieveBlocks(startNum uint64) (*blocksItr, error) { 502 return newBlockItr(mgr, startNum), nil 503 } 504 505 func (mgr *blockfileMgr) retrieveTransactionByID(txID string) (*common.Envelope, error) { 506 logger.Debugf("retrieveTransactionByID() - txId = [%s]", txID) 507 loc, err := mgr.index.getTxLoc(txID) 508 if err != nil { 509 return nil, err 510 } 511 return mgr.fetchTransactionEnvelope(loc) 512 } 513 514 func (mgr *blockfileMgr) retrieveTransactionByBlockNumTranNum(blockNum uint64, tranNum uint64) (*common.Envelope, error) { 515 logger.Debugf("retrieveTransactionByBlockNumTranNum() - blockNum = [%d], tranNum = [%d]", blockNum, tranNum) 516 loc, err := mgr.index.getTXLocByBlockNumTranNum(blockNum, tranNum) 517 if err != nil { 518 return nil, err 519 } 520 return mgr.fetchTransactionEnvelope(loc) 521 } 522 523 func (mgr *blockfileMgr) fetchBlock(lp *fileLocPointer) (*common.Block, error) { 524 blockBytes, err := mgr.fetchBlockBytes(lp) 525 if err != nil { 526 return nil, err 527 } 528 block, err := deserializeBlock(blockBytes) 529 if err != nil { 530 return nil, err 531 } 532 return block, nil 533 } 534 535 func (mgr *blockfileMgr) fetchTransactionEnvelope(lp *fileLocPointer) (*common.Envelope, error) { 536 logger.Debugf("Entering fetchTransactionEnvelope() %v\n", lp) 537 var err error 538 var txEnvelopeBytes []byte 539 if txEnvelopeBytes, err = mgr.fetchRawBytes(lp); err != nil { 540 return nil, err 541 } 542 _, n := proto.DecodeVarint(txEnvelopeBytes) 543 return putil.GetEnvelopeFromBlock(txEnvelopeBytes[n:]) 544 } 545 546 func (mgr *blockfileMgr) fetchBlockBytes(lp *fileLocPointer) ([]byte, error) { 547 stream, err := newBlockfileStream(mgr.rootDir, lp.fileSuffixNum, int64(lp.offset)) 548 if err != nil { 549 return nil, err 550 } 551 defer stream.close() 552 b, err := stream.nextBlockBytes() 553 if err != nil { 554 return nil, err 555 } 556 return b, nil 557 } 558 559 func (mgr *blockfileMgr) fetchRawBytes(lp *fileLocPointer) ([]byte, error) { 560 filePath := deriveBlockfilePath(mgr.rootDir, lp.fileSuffixNum) 561 reader, err := newBlockfileReader(filePath) 562 if err != nil { 563 return nil, err 564 } 565 defer reader.close() 566 b, err := reader.read(lp.offset, lp.bytesLength) 567 if err != nil { 568 return nil, err 569 } 570 return b, nil 571 } 572 573 //Get the current checkpoint information that is stored in the database 574 func (mgr *blockfileMgr) loadCurrentInfo() (*checkpointInfo, error) { 575 var b []byte 576 var err error 577 if b, err = mgr.db.Get(blkMgrInfoKey); b == nil || err != nil { 578 return nil, err 579 } 580 i := &checkpointInfo{} 581 if err = i.unmarshal(b); err != nil { 582 return nil, err 583 } 584 logger.Debugf("loaded checkpointInfo:%s", i) 585 return i, nil 586 } 587 588 func (mgr *blockfileMgr) saveCurrentInfo(i *checkpointInfo, sync bool) error { 589 b, err := i.marshal() 590 if err != nil { 591 return err 592 } 593 if err = mgr.db.Put(blkMgrInfoKey, b, sync); err != nil { 594 return err 595 } 596 return nil 597 } 598 599 // scanForLastCompleteBlock scan a given block file and detects the last offset in the file 600 // after which there may lie a block partially written (towards the end of the file in a crash scenario). 601 func scanForLastCompleteBlock(rootDir string, fileNum int, startingOffset int64) ([]byte, int64, int, error) { 602 //scan the passed file number suffix starting from the passed offset to find the last completed block 603 numBlocks := 0 604 var lastBlockBytes []byte 605 blockStream, errOpen := newBlockfileStream(rootDir, fileNum, startingOffset) 606 if errOpen != nil { 607 return nil, 0, 0, errOpen 608 } 609 defer blockStream.close() 610 var errRead error 611 var blockBytes []byte 612 for { 613 blockBytes, errRead = blockStream.nextBlockBytes() 614 if blockBytes == nil || errRead != nil { 615 break 616 } 617 lastBlockBytes = blockBytes 618 numBlocks++ 619 } 620 if errRead == ErrUnexpectedEndOfBlockfile { 621 logger.Debugf(`Error:%s 622 The error may happen if a crash has happened during block appending. 623 Resetting error to nil and returning current offset as a last complete block's end offset`, errRead) 624 errRead = nil 625 } 626 logger.Debugf("scanForLastCompleteBlock(): last complete block ends at offset=[%d]", blockStream.currentOffset) 627 return lastBlockBytes, blockStream.currentOffset, numBlocks, errRead 628 } 629 630 // checkpointInfo 631 type checkpointInfo struct { 632 latestFileChunkSuffixNum int 633 latestFileChunksize int 634 isChainEmpty bool 635 lastBlockNumber uint64 636 } 637 638 func (i *checkpointInfo) marshal() ([]byte, error) { 639 buffer := proto.NewBuffer([]byte{}) 640 var err error 641 if err = buffer.EncodeVarint(uint64(i.latestFileChunkSuffixNum)); err != nil { 642 return nil, err 643 } 644 if err = buffer.EncodeVarint(uint64(i.latestFileChunksize)); err != nil { 645 return nil, err 646 } 647 if err = buffer.EncodeVarint(i.lastBlockNumber); err != nil { 648 return nil, err 649 } 650 var chainEmptyMarker uint64 651 if i.isChainEmpty { 652 chainEmptyMarker = 1 653 } 654 if err = buffer.EncodeVarint(chainEmptyMarker); err != nil { 655 return nil, err 656 } 657 return buffer.Bytes(), nil 658 } 659 660 func (i *checkpointInfo) unmarshal(b []byte) error { 661 buffer := proto.NewBuffer(b) 662 var val uint64 663 var chainEmptyMarker uint64 664 var err error 665 666 if val, err = buffer.DecodeVarint(); err != nil { 667 return err 668 } 669 i.latestFileChunkSuffixNum = int(val) 670 671 if val, err = buffer.DecodeVarint(); err != nil { 672 return err 673 } 674 i.latestFileChunksize = int(val) 675 676 if val, err = buffer.DecodeVarint(); err != nil { 677 return err 678 } 679 i.lastBlockNumber = val 680 if chainEmptyMarker, err = buffer.DecodeVarint(); err != nil { 681 return err 682 } 683 i.isChainEmpty = chainEmptyMarker == 1 684 return nil 685 } 686 687 func (i *checkpointInfo) String() string { 688 return fmt.Sprintf("latestFileChunkSuffixNum=[%d], latestFileChunksize=[%d], isChainEmpty=[%t], lastBlockNumber=[%d]", 689 i.latestFileChunkSuffixNum, i.latestFileChunksize, i.isChainEmpty, i.lastBlockNumber) 690 }