github.com/leonlxy/hyperledger@v1.0.0-alpha.0.20170427033203-34922035d248/common/ledger/blkstorage/fsblkstorage/blockfile_mgr.go (about) 1 /* 2 Copyright IBM Corp. 2016 All Rights Reserved. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package fsblkstorage 18 19 import ( 20 "fmt" 21 "math" 22 "sync" 23 "sync/atomic" 24 25 "github.com/golang/protobuf/proto" 26 "github.com/hyperledger/fabric/common/flogging" 27 "github.com/hyperledger/fabric/common/ledger/blkstorage" 28 "github.com/hyperledger/fabric/common/ledger/util" 29 "github.com/hyperledger/fabric/common/ledger/util/leveldbhelper" 30 "github.com/hyperledger/fabric/protos/common" 31 "github.com/hyperledger/fabric/protos/peer" 32 putil "github.com/hyperledger/fabric/protos/utils" 33 ) 34 35 var logger = flogging.MustGetLogger("kvledger") 36 37 const ( 38 blockfilePrefix = "blockfile_" 39 ) 40 41 var ( 42 blkMgrInfoKey = []byte("blkMgrInfo") 43 ) 44 45 type conf struct { 46 blockfilesDir string 47 maxBlockfileSize int 48 } 49 50 type blockfileMgr struct { 51 rootDir string 52 conf *Conf 53 db *leveldbhelper.DBHandle 54 index index 55 cpInfo *checkpointInfo 56 cpInfoCond *sync.Cond 57 currentFileWriter *blockfileWriter 58 bcInfo atomic.Value 59 } 60 61 /* 62 Creates a new manager that will manage the files used for block persistence. 63 This manager manages the file system FS including 64 -- the directory where the files are stored 65 -- the individual files where the blocks are stored 66 -- the checkpoint which tracks the latest file being persisted to 67 -- the index which tracks what block and transaction is in what file 68 When a new blockfile manager is started (i.e. only on start-up), it checks 69 if this start-up is the first time the system is coming up or is this a restart 70 of the system. 71 72 The blockfile manager stores blocks of data into a file system. That file 73 storage is done by creating sequentially numbered files of a configured size 74 i.e blockfile_000000, blockfile_000001, etc.. 75 76 Each transcation in a block is stored with information about the number of 77 bytes in that transaction 78 Adding txLoc [fileSuffixNum=0, offset=3, bytesLength=104] for tx [1:0] to index 79 Adding txLoc [fileSuffixNum=0, offset=107, bytesLength=104] for tx [1:1] to index 80 Each block is stored with the total encoded length of that block as well as the 81 tx location offsets. 82 83 Remember that these steps are only done once at start-up of the system. 84 At start up a new manager: 85 *) Checks if the directory for storing files exists, if not creates the dir 86 *) Checks if the key value database exists, if not creates one 87 (will create a db dir) 88 *) Determines the checkpoint information (cpinfo) used for storage 89 -- Loads from db if exist, if not instantiate a new cpinfo 90 -- If cpinfo was loaded from db, compares to FS 91 -- If cpinfo and file system are not in sync, syncs cpInfo from FS 92 *) Starts a new file writer 93 -- truncates file per cpinfo to remove any excess past last block 94 *) Determines the index information used to find tx and blocks in 95 the file blkstorage 96 -- Instantiates a new blockIdxInfo 97 -- Loads the index from the db if exists 98 -- syncIndex comparing the last block indexed to what is in the FS 99 -- If index and file system are not in sync, syncs index from the FS 100 *) Updates blockchain info used by the APIs 101 */ 102 func newBlockfileMgr(id string, conf *Conf, indexConfig *blkstorage.IndexConfig, indexStore *leveldbhelper.DBHandle) *blockfileMgr { 103 logger.Debugf("newBlockfileMgr() initializing file-based block storage for ledger: %s ", id) 104 //Determine the root directory for the blockfile storage, if it does not exist create it 105 rootDir := conf.getLedgerBlockDir(id) 106 _, err := util.CreateDirIfMissing(rootDir) 107 if err != nil { 108 panic(fmt.Sprintf("Error: %s", err)) 109 } 110 // Instantiate the manager, i.e. blockFileMgr structure 111 mgr := &blockfileMgr{rootDir: rootDir, conf: conf, db: indexStore} 112 113 // cp = checkpointInfo, retrieve from the database the file suffix or number of where blocks were stored. 114 // It also retrieves the current size of that file and the last block number that was written to that file. 115 // At init checkpointInfo:latestFileChunkSuffixNum=[0], latestFileChunksize=[0], lastBlockNumber=[0] 116 cpInfo, err := mgr.loadCurrentInfo() 117 if err != nil { 118 panic(fmt.Sprintf("Could not get block file info for current block file from db: %s", err)) 119 } 120 if cpInfo == nil { //if no cpInfo stored in db initiate to zero 121 cpInfo = &checkpointInfo{0, 0, true, 0} 122 err = mgr.saveCurrentInfo(cpInfo, true) 123 if err != nil { 124 panic(fmt.Sprintf("Could not save next block file info to db: %s", err)) 125 } 126 } 127 //Verify that the checkpoint stored in db is accurate with what is actually stored in block file system 128 // If not the same, sync the cpInfo and the file system 129 syncCPInfoFromFS(rootDir, cpInfo) 130 //Open a writer to the file identified by the number and truncate it to only contain the latest block 131 // that was completely saved (file system, index, cpinfo, etc) 132 currentFileWriter, err := newBlockfileWriter(deriveBlockfilePath(rootDir, cpInfo.latestFileChunkSuffixNum)) 133 if err != nil { 134 panic(fmt.Sprintf("Could not open writer to current file: %s", err)) 135 } 136 //Truncate the file to remove excess past last block 137 err = currentFileWriter.truncateFile(cpInfo.latestFileChunksize) 138 if err != nil { 139 panic(fmt.Sprintf("Could not truncate current file to known size in db: %s", err)) 140 } 141 142 // Create a new KeyValue store database handler for the blocks index in the keyvalue database 143 mgr.index = newBlockIndex(indexConfig, indexStore) 144 145 // Update the manager with the checkpoint info and the file writer 146 mgr.cpInfo = cpInfo 147 mgr.currentFileWriter = currentFileWriter 148 // Create a checkpoint condition (event) variable, for the goroutine waiting for 149 // or announcing the occurrence of an event. 150 mgr.cpInfoCond = sync.NewCond(&sync.Mutex{}) 151 152 // Verify that the index stored in db is accurate with what is actually stored in block file system 153 // If not the same, sync the index and the file system 154 mgr.syncIndex() 155 156 // init BlockchainInfo for external API's 157 bcInfo := &common.BlockchainInfo{ 158 Height: 0, 159 CurrentBlockHash: nil, 160 PreviousBlockHash: nil} 161 162 //If start up is a restart of an existing storage, update BlockchainInfo for external API's 163 if !cpInfo.isChainEmpty { 164 lastBlockHeader, err := mgr.retrieveBlockHeaderByNumber(cpInfo.lastBlockNumber) 165 if err != nil { 166 panic(fmt.Sprintf("Could not retrieve header of the last block form file: %s", err)) 167 } 168 lastBlockHash := lastBlockHeader.Hash() 169 previousBlockHash := lastBlockHeader.PreviousHash 170 bcInfo = &common.BlockchainInfo{ 171 Height: cpInfo.lastBlockNumber + 1, 172 CurrentBlockHash: lastBlockHash, 173 PreviousBlockHash: previousBlockHash} 174 } 175 mgr.bcInfo.Store(bcInfo) 176 //return the new manager (blockfileMgr) 177 return mgr 178 } 179 180 //cp = checkpointInfo, from the database gets the file suffix and the size of 181 // the file of where the last block was written. Also retrieves contains the 182 // last block number that was written. At init 183 //checkpointInfo:latestFileChunkSuffixNum=[0], latestFileChunksize=[0], lastBlockNumber=[0] 184 func syncCPInfoFromFS(rootDir string, cpInfo *checkpointInfo) { 185 logger.Debugf("Starting checkpoint=%s", cpInfo) 186 //Checks if the file suffix of where the last block was written exists 187 filePath := deriveBlockfilePath(rootDir, cpInfo.latestFileChunkSuffixNum) 188 exists, size, err := util.FileExists(filePath) 189 if err != nil { 190 panic(fmt.Sprintf("Error in checking whether file [%s] exists: %s", filePath, err)) 191 } 192 logger.Debugf("status of file [%s]: exists=[%t], size=[%d]", filePath, exists, size) 193 //Test is !exists because when file number is first used the file does not exist yet 194 //checks that the file exists and that the size of the file is what is stored in cpinfo 195 //status of file [/tmp/tests/ledger/blkstorage/fsblkstorage/blocks/blockfile_000000]: exists=[false], size=[0] 196 if !exists || int(size) == cpInfo.latestFileChunksize { 197 // check point info is in sync with the file on disk 198 return 199 } 200 //Scan the file system to verify that the checkpoint info stored in db is correct 201 endOffsetLastBlock, numBlocks, err := scanForLastCompleteBlock( 202 rootDir, cpInfo.latestFileChunkSuffixNum, int64(cpInfo.latestFileChunksize)) 203 if err != nil { 204 panic(fmt.Sprintf("Could not open current file for detecting last block in the file: %s", err)) 205 } 206 cpInfo.latestFileChunksize = int(endOffsetLastBlock) 207 if numBlocks == 0 { 208 return 209 } 210 //Updates the checkpoint info for the actual last block number stored and it's end location 211 if cpInfo.isChainEmpty { 212 cpInfo.lastBlockNumber = uint64(numBlocks - 1) 213 } else { 214 cpInfo.lastBlockNumber += uint64(numBlocks) 215 } 216 cpInfo.isChainEmpty = false 217 logger.Debugf("Checkpoint after updates by scanning the last file segment:%s", cpInfo) 218 } 219 220 func deriveBlockfilePath(rootDir string, suffixNum int) string { 221 return rootDir + "/" + blockfilePrefix + fmt.Sprintf("%06d", suffixNum) 222 } 223 224 func (mgr *blockfileMgr) open() error { 225 return mgr.currentFileWriter.open() 226 } 227 228 func (mgr *blockfileMgr) close() { 229 mgr.currentFileWriter.close() 230 } 231 232 func (mgr *blockfileMgr) moveToNextFile() { 233 cpInfo := &checkpointInfo{ 234 latestFileChunkSuffixNum: mgr.cpInfo.latestFileChunkSuffixNum + 1, 235 latestFileChunksize: 0, 236 lastBlockNumber: mgr.cpInfo.lastBlockNumber} 237 238 nextFileWriter, err := newBlockfileWriter( 239 deriveBlockfilePath(mgr.rootDir, cpInfo.latestFileChunkSuffixNum)) 240 241 if err != nil { 242 panic(fmt.Sprintf("Could not open writer to next file: %s", err)) 243 } 244 mgr.currentFileWriter.close() 245 err = mgr.saveCurrentInfo(cpInfo, true) 246 if err != nil { 247 panic(fmt.Sprintf("Could not save next block file info to db: %s", err)) 248 } 249 mgr.currentFileWriter = nextFileWriter 250 mgr.updateCheckpoint(cpInfo) 251 } 252 253 func (mgr *blockfileMgr) addBlock(block *common.Block) error { 254 if block.Header.Number != mgr.getBlockchainInfo().Height { 255 return fmt.Errorf("Block number should have been %d but was %d", mgr.getBlockchainInfo().Height, block.Header.Number) 256 } 257 blockBytes, info, err := serializeBlock(block) 258 if err != nil { 259 return fmt.Errorf("Error while serializing block: %s", err) 260 } 261 blockHash := block.Header.Hash() 262 //Get the location / offset where each transaction starts in the block and where the block ends 263 txOffsets := info.txOffsets 264 currentOffset := mgr.cpInfo.latestFileChunksize 265 if err != nil { 266 return fmt.Errorf("Error while serializing block: %s", err) 267 } 268 blockBytesLen := len(blockBytes) 269 blockBytesEncodedLen := proto.EncodeVarint(uint64(blockBytesLen)) 270 totalBytesToAppend := blockBytesLen + len(blockBytesEncodedLen) 271 272 //Determine if we need to start a new file since the size of this block 273 //exceeds the amount of space left in the current file 274 if currentOffset+totalBytesToAppend > mgr.conf.maxBlockfileSize { 275 mgr.moveToNextFile() 276 currentOffset = 0 277 } 278 //append blockBytesEncodedLen to the file 279 err = mgr.currentFileWriter.append(blockBytesEncodedLen, false) 280 if err == nil { 281 //append the actual block bytes to the file 282 err = mgr.currentFileWriter.append(blockBytes, true) 283 } 284 if err != nil { 285 truncateErr := mgr.currentFileWriter.truncateFile(mgr.cpInfo.latestFileChunksize) 286 if truncateErr != nil { 287 panic(fmt.Sprintf("Could not truncate current file to known size after an error during block append: %s", err)) 288 } 289 return fmt.Errorf("Error while appending block to file: %s", err) 290 } 291 292 //Update the checkpoint info with the results of adding the new block 293 currentCPInfo := mgr.cpInfo 294 newCPInfo := &checkpointInfo{ 295 latestFileChunkSuffixNum: currentCPInfo.latestFileChunkSuffixNum, 296 latestFileChunksize: currentCPInfo.latestFileChunksize + totalBytesToAppend, 297 isChainEmpty: false, 298 lastBlockNumber: block.Header.Number} 299 //save the checkpoint information in the database 300 if err = mgr.saveCurrentInfo(newCPInfo, false); err != nil { 301 truncateErr := mgr.currentFileWriter.truncateFile(currentCPInfo.latestFileChunksize) 302 if truncateErr != nil { 303 panic(fmt.Sprintf("Error in truncating current file to known size after an error in saving checkpoint info: %s", err)) 304 } 305 return fmt.Errorf("Error while saving current file info to db: %s", err) 306 } 307 308 //Index block file location pointer updated with file suffex and offset for the new block 309 blockFLP := &fileLocPointer{fileSuffixNum: newCPInfo.latestFileChunkSuffixNum} 310 blockFLP.offset = currentOffset 311 // shift the txoffset because we prepend length of bytes before block bytes 312 for _, txOffset := range txOffsets { 313 txOffset.loc.offset += len(blockBytesEncodedLen) 314 } 315 //save the index in the database 316 mgr.index.indexBlock(&blockIdxInfo{ 317 blockNum: block.Header.Number, blockHash: blockHash, 318 flp: blockFLP, txOffsets: txOffsets, metadata: block.Metadata}) 319 320 //update the checkpoint info (for storage) and the blockchain info (for APIs) in the manager 321 mgr.updateCheckpoint(newCPInfo) 322 mgr.updateBlockchainInfo(blockHash, block) 323 return nil 324 } 325 326 func (mgr *blockfileMgr) syncIndex() error { 327 var lastBlockIndexed uint64 328 var indexEmpty bool 329 var err error 330 //from the database, get the last block that was indexed 331 if lastBlockIndexed, err = mgr.index.getLastBlockIndexed(); err != nil { 332 if err != errIndexEmpty { 333 return err 334 } 335 indexEmpty = true 336 } 337 //initialize index to file number:zero, offset:zero and blockNum:0 338 startFileNum := 0 339 startOffset := 0 340 blockNum := uint64(0) 341 skipFirstBlock := false 342 //get the last file that blocks were added to using the checkpoint info 343 endFileNum := mgr.cpInfo.latestFileChunkSuffixNum 344 //if the index stored in the db has value, update the index information with those values 345 if !indexEmpty { 346 var flp *fileLocPointer 347 if flp, err = mgr.index.getBlockLocByBlockNum(lastBlockIndexed); err != nil { 348 return err 349 } 350 startFileNum = flp.fileSuffixNum 351 startOffset = flp.locPointer.offset 352 blockNum = lastBlockIndexed 353 skipFirstBlock = true 354 } 355 356 //open a blockstream to the file location that was stored in the index 357 var stream *blockStream 358 if stream, err = newBlockStream(mgr.rootDir, startFileNum, int64(startOffset), endFileNum); err != nil { 359 return err 360 } 361 var blockBytes []byte 362 var blockPlacementInfo *blockPlacementInfo 363 364 if skipFirstBlock { 365 if blockBytes, _, err = stream.nextBlockBytesAndPlacementInfo(); err != nil { 366 return err 367 } 368 if blockBytes == nil { 369 return fmt.Errorf("block bytes for block num = [%d] should not be nil here. The indexes for the block are already present", 370 lastBlockIndexed) 371 } 372 } 373 374 //Should be at the last block already, but go ahead and loop looking for next blockBytes. 375 //If there is another block, add it to the index. 376 //This will ensure block indexes are correct, for example if peer had crashed before indexes got updated. 377 for { 378 if blockBytes, blockPlacementInfo, err = stream.nextBlockBytesAndPlacementInfo(); err != nil { 379 return err 380 } 381 if blockBytes == nil { 382 break 383 } 384 info, err := extractSerializedBlockInfo(blockBytes) 385 if err != nil { 386 return err 387 } 388 389 //The blockStartOffset will get applied to the txOffsets prior to indexing within indexBlock(), 390 //therefore just shift by the difference between blockBytesOffset and blockStartOffset 391 numBytesToShift := int(blockPlacementInfo.blockBytesOffset - blockPlacementInfo.blockStartOffset) 392 for _, offset := range info.txOffsets { 393 offset.loc.offset += numBytesToShift 394 } 395 396 //Update the blockIndexInfo with what was actually stored in file system 397 blockIdxInfo := &blockIdxInfo{} 398 blockIdxInfo.blockHash = info.blockHeader.Hash() 399 blockIdxInfo.blockNum = info.blockHeader.Number 400 blockIdxInfo.flp = &fileLocPointer{fileSuffixNum: blockPlacementInfo.fileNum, 401 locPointer: locPointer{offset: int(blockPlacementInfo.blockStartOffset)}} 402 blockIdxInfo.txOffsets = info.txOffsets 403 blockIdxInfo.metadata = info.metadata 404 405 logger.Debugf("syncIndex() indexing block [%d]", blockIdxInfo.blockNum) 406 if err = mgr.index.indexBlock(blockIdxInfo); err != nil { 407 return err 408 } 409 blockNum++ 410 } 411 return nil 412 } 413 414 func (mgr *blockfileMgr) getBlockchainInfo() *common.BlockchainInfo { 415 return mgr.bcInfo.Load().(*common.BlockchainInfo) 416 } 417 418 func (mgr *blockfileMgr) updateCheckpoint(cpInfo *checkpointInfo) { 419 mgr.cpInfoCond.L.Lock() 420 defer mgr.cpInfoCond.L.Unlock() 421 mgr.cpInfo = cpInfo 422 logger.Debugf("Broadcasting about update checkpointInfo: %s", cpInfo) 423 mgr.cpInfoCond.Broadcast() 424 } 425 426 func (mgr *blockfileMgr) updateBlockchainInfo(latestBlockHash []byte, latestBlock *common.Block) { 427 currentBCInfo := mgr.getBlockchainInfo() 428 newBCInfo := &common.BlockchainInfo{ 429 Height: currentBCInfo.Height + 1, 430 CurrentBlockHash: latestBlockHash, 431 PreviousBlockHash: latestBlock.Header.PreviousHash} 432 433 mgr.bcInfo.Store(newBCInfo) 434 } 435 436 func (mgr *blockfileMgr) retrieveBlockByHash(blockHash []byte) (*common.Block, error) { 437 logger.Debugf("retrieveBlockByHash() - blockHash = [%#v]", blockHash) 438 loc, err := mgr.index.getBlockLocByHash(blockHash) 439 if err != nil { 440 return nil, err 441 } 442 return mgr.fetchBlock(loc) 443 } 444 445 func (mgr *blockfileMgr) retrieveBlockByNumber(blockNum uint64) (*common.Block, error) { 446 logger.Debugf("retrieveBlockByNumber() - blockNum = [%d]", blockNum) 447 448 // interpret math.MaxUint64 as a request for last block 449 if blockNum == math.MaxUint64 { 450 blockNum = mgr.getBlockchainInfo().Height - 1 451 } 452 453 loc, err := mgr.index.getBlockLocByBlockNum(blockNum) 454 if err != nil { 455 return nil, err 456 } 457 return mgr.fetchBlock(loc) 458 } 459 460 func (mgr *blockfileMgr) retrieveBlockByTxID(txID string) (*common.Block, error) { 461 logger.Debugf("retrieveBlockByTxID() - txID = [%s]", txID) 462 463 loc, err := mgr.index.getBlockLocByTxID(txID) 464 465 if err != nil { 466 return nil, err 467 } 468 return mgr.fetchBlock(loc) 469 } 470 471 func (mgr *blockfileMgr) retrieveTxValidationCodeByTxID(txID string) (peer.TxValidationCode, error) { 472 logger.Debugf("retrieveTxValidationCodeByTxID() - txID = [%s]", txID) 473 return mgr.index.getTxValidationCodeByTxID(txID) 474 } 475 476 func (mgr *blockfileMgr) retrieveBlockHeaderByNumber(blockNum uint64) (*common.BlockHeader, error) { 477 logger.Debugf("retrieveBlockHeaderByNumber() - blockNum = [%d]", blockNum) 478 loc, err := mgr.index.getBlockLocByBlockNum(blockNum) 479 if err != nil { 480 return nil, err 481 } 482 blockBytes, err := mgr.fetchBlockBytes(loc) 483 if err != nil { 484 return nil, err 485 } 486 info, err := extractSerializedBlockInfo(blockBytes) 487 if err != nil { 488 return nil, err 489 } 490 return info.blockHeader, nil 491 } 492 493 func (mgr *blockfileMgr) retrieveBlocks(startNum uint64) (*blocksItr, error) { 494 return newBlockItr(mgr, startNum), nil 495 } 496 497 func (mgr *blockfileMgr) retrieveTransactionByID(txID string) (*common.Envelope, error) { 498 logger.Debugf("retrieveTransactionByID() - txId = [%s]", txID) 499 loc, err := mgr.index.getTxLoc(txID) 500 if err != nil { 501 return nil, err 502 } 503 return mgr.fetchTransactionEnvelope(loc) 504 } 505 506 func (mgr *blockfileMgr) retrieveTransactionByBlockNumTranNum(blockNum uint64, tranNum uint64) (*common.Envelope, error) { 507 logger.Debugf("retrieveTransactionByBlockNumTranNum() - blockNum = [%d], tranNum = [%d]", blockNum, tranNum) 508 loc, err := mgr.index.getTXLocByBlockNumTranNum(blockNum, tranNum) 509 if err != nil { 510 return nil, err 511 } 512 return mgr.fetchTransactionEnvelope(loc) 513 } 514 515 func (mgr *blockfileMgr) fetchBlock(lp *fileLocPointer) (*common.Block, error) { 516 blockBytes, err := mgr.fetchBlockBytes(lp) 517 if err != nil { 518 return nil, err 519 } 520 block, err := deserializeBlock(blockBytes) 521 if err != nil { 522 return nil, err 523 } 524 return block, nil 525 } 526 527 func (mgr *blockfileMgr) fetchTransactionEnvelope(lp *fileLocPointer) (*common.Envelope, error) { 528 logger.Debugf("Entering fetchTransactionEnvelope() %v\n", lp) 529 var err error 530 var txEnvelopeBytes []byte 531 if txEnvelopeBytes, err = mgr.fetchRawBytes(lp); err != nil { 532 return nil, err 533 } 534 _, n := proto.DecodeVarint(txEnvelopeBytes) 535 return putil.GetEnvelopeFromBlock(txEnvelopeBytes[n:]) 536 } 537 538 func (mgr *blockfileMgr) fetchBlockBytes(lp *fileLocPointer) ([]byte, error) { 539 stream, err := newBlockfileStream(mgr.rootDir, lp.fileSuffixNum, int64(lp.offset)) 540 if err != nil { 541 return nil, err 542 } 543 defer stream.close() 544 b, err := stream.nextBlockBytes() 545 if err != nil { 546 return nil, err 547 } 548 return b, nil 549 } 550 551 func (mgr *blockfileMgr) fetchRawBytes(lp *fileLocPointer) ([]byte, error) { 552 filePath := deriveBlockfilePath(mgr.rootDir, lp.fileSuffixNum) 553 reader, err := newBlockfileReader(filePath) 554 if err != nil { 555 return nil, err 556 } 557 defer reader.close() 558 b, err := reader.read(lp.offset, lp.bytesLength) 559 if err != nil { 560 return nil, err 561 } 562 return b, nil 563 } 564 565 //Get the current checkpoint information that is stored in the database 566 func (mgr *blockfileMgr) loadCurrentInfo() (*checkpointInfo, error) { 567 var b []byte 568 var err error 569 if b, err = mgr.db.Get(blkMgrInfoKey); b == nil || err != nil { 570 return nil, err 571 } 572 i := &checkpointInfo{} 573 if err = i.unmarshal(b); err != nil { 574 return nil, err 575 } 576 logger.Debugf("loaded checkpointInfo:%s", i) 577 return i, nil 578 } 579 580 func (mgr *blockfileMgr) saveCurrentInfo(i *checkpointInfo, sync bool) error { 581 b, err := i.marshal() 582 if err != nil { 583 return err 584 } 585 if err = mgr.db.Put(blkMgrInfoKey, b, sync); err != nil { 586 return err 587 } 588 return nil 589 } 590 591 // scanForLastCompleteBlock scan a given block file and detects the last offset in the file 592 // after which there may lie a block partially written (towards the end of the file in a crash scenario). 593 func scanForLastCompleteBlock(rootDir string, fileNum int, startingOffset int64) (int64, int, error) { 594 //scan the passed file number suffix starting from the passed offset to find the last completed block 595 numBlocks := 0 596 blockStream, errOpen := newBlockfileStream(rootDir, fileNum, startingOffset) 597 if errOpen != nil { 598 return 0, 0, errOpen 599 } 600 defer blockStream.close() 601 var errRead error 602 var blockBytes []byte 603 for { 604 blockBytes, errRead = blockStream.nextBlockBytes() 605 if blockBytes == nil || errRead != nil { 606 break 607 } 608 numBlocks++ 609 } 610 if errRead == ErrUnexpectedEndOfBlockfile { 611 logger.Debugf(`Error:%s 612 The error may happen if a crash has happened during block appending. 613 Resetting error to nil and returning current offset as a last complete block's end offset`, errRead) 614 errRead = nil 615 } 616 logger.Debugf("scanForLastCompleteBlock(): last complete block ends at offset=[%d]", blockStream.currentOffset) 617 return blockStream.currentOffset, numBlocks, errRead 618 } 619 620 // checkpointInfo 621 type checkpointInfo struct { 622 latestFileChunkSuffixNum int 623 latestFileChunksize int 624 isChainEmpty bool 625 lastBlockNumber uint64 626 } 627 628 func (i *checkpointInfo) marshal() ([]byte, error) { 629 buffer := proto.NewBuffer([]byte{}) 630 var err error 631 if err = buffer.EncodeVarint(uint64(i.latestFileChunkSuffixNum)); err != nil { 632 return nil, err 633 } 634 if err = buffer.EncodeVarint(uint64(i.latestFileChunksize)); err != nil { 635 return nil, err 636 } 637 if err = buffer.EncodeVarint(i.lastBlockNumber); err != nil { 638 return nil, err 639 } 640 var chainEmptyMarker uint64 641 if i.isChainEmpty { 642 chainEmptyMarker = 1 643 } 644 if err = buffer.EncodeVarint(chainEmptyMarker); err != nil { 645 return nil, err 646 } 647 return buffer.Bytes(), nil 648 } 649 650 func (i *checkpointInfo) unmarshal(b []byte) error { 651 buffer := proto.NewBuffer(b) 652 var val uint64 653 var chainEmptyMarker uint64 654 var err error 655 656 if val, err = buffer.DecodeVarint(); err != nil { 657 return err 658 } 659 i.latestFileChunkSuffixNum = int(val) 660 661 if val, err = buffer.DecodeVarint(); err != nil { 662 return err 663 } 664 i.latestFileChunksize = int(val) 665 666 if val, err = buffer.DecodeVarint(); err != nil { 667 return err 668 } 669 i.lastBlockNumber = val 670 if chainEmptyMarker, err = buffer.DecodeVarint(); err != nil { 671 return err 672 } 673 i.isChainEmpty = chainEmptyMarker == 1 674 return nil 675 } 676 677 func (i *checkpointInfo) String() string { 678 return fmt.Sprintf("latestFileChunkSuffixNum=[%d], latestFileChunksize=[%d], isChainEmpty=[%t], lastBlockNumber=[%d]", 679 i.latestFileChunkSuffixNum, i.latestFileChunksize, i.isChainEmpty, i.lastBlockNumber) 680 }