github.com/sykesm/fabric@v1.1.0-preview.0.20200129034918-2aa12b1a0181/common/ledger/blkstorage/fsblkstorage/block_stream.go (about) 1 /* 2 Copyright IBM Corp. All Rights Reserved. 3 4 SPDX-License-Identifier: Apache-2.0 5 */ 6 7 package fsblkstorage 8 9 import ( 10 "bufio" 11 "fmt" 12 "io" 13 "os" 14 15 "github.com/golang/protobuf/proto" 16 "github.com/pkg/errors" 17 ) 18 19 // ErrUnexpectedEndOfBlockfile error used to indicate an unexpected end of a file segment 20 // this can happen mainly if a crash occurs during appening a block and partial block contents 21 // get written towards the end of the file 22 var ErrUnexpectedEndOfBlockfile = errors.New("unexpected end of blockfile") 23 24 // blockfileStream reads blocks sequentially from a single file. 25 // It starts from the given offset and can traverse till the end of the file 26 type blockfileStream struct { 27 fileNum int 28 file *os.File 29 reader *bufio.Reader 30 currentOffset int64 31 } 32 33 // blockStream reads blocks sequentially from multiple files. 34 // it starts from a given file offset and continues with the next 35 // file segment until the end of the last segment (`endFileNum`) 36 type blockStream struct { 37 rootDir string 38 currentFileNum int 39 endFileNum int 40 currentFileStream *blockfileStream 41 } 42 43 // blockPlacementInfo captures the information related 44 // to block's placement in the file. 45 type blockPlacementInfo struct { 46 fileNum int 47 blockStartOffset int64 48 blockBytesOffset int64 49 } 50 51 /////////////////////////////////// 52 // blockfileStream functions 53 //////////////////////////////////// 54 func newBlockfileStream(rootDir string, fileNum int, startOffset int64) (*blockfileStream, error) { 55 filePath := deriveBlockfilePath(rootDir, fileNum) 56 logger.Debugf("newBlockfileStream(): filePath=[%s], startOffset=[%d]", filePath, startOffset) 57 var file *os.File 58 var err error 59 if file, err = os.OpenFile(filePath, os.O_RDONLY, 0600); err != nil { 60 return nil, errors.Wrapf(err, "error opening block file %s", filePath) 61 } 62 var newPosition int64 63 if newPosition, err = file.Seek(startOffset, 0); err != nil { 64 return nil, errors.Wrapf(err, "error seeking block file [%s] to startOffset [%d]", filePath, startOffset) 65 } 66 if newPosition != startOffset { 67 panic(fmt.Sprintf("Could not seek block file [%s] to startOffset [%d]. New position = [%d]", 68 filePath, startOffset, newPosition)) 69 } 70 s := &blockfileStream{fileNum, file, bufio.NewReader(file), startOffset} 71 return s, nil 72 } 73 74 func (s *blockfileStream) nextBlockBytes() ([]byte, error) { 75 blockBytes, _, err := s.nextBlockBytesAndPlacementInfo() 76 return blockBytes, err 77 } 78 79 // nextBlockBytesAndPlacementInfo returns bytes for the next block 80 // along with the offset information in the block file. 81 // An error `ErrUnexpectedEndOfBlockfile` is returned if a partial written data is detected 82 // which is possible towards the tail of the file if a crash had taken place during appending of a block 83 func (s *blockfileStream) nextBlockBytesAndPlacementInfo() ([]byte, *blockPlacementInfo, error) { 84 var lenBytes []byte 85 var err error 86 var fileInfo os.FileInfo 87 moreContentAvailable := true 88 89 if fileInfo, err = s.file.Stat(); err != nil { 90 return nil, nil, errors.Wrapf(err, "error getting block file stat") 91 } 92 if s.currentOffset == fileInfo.Size() { 93 logger.Debugf("Finished reading file number [%d]", s.fileNum) 94 return nil, nil, nil 95 } 96 remainingBytes := fileInfo.Size() - s.currentOffset 97 // Peek 8 or smaller number of bytes (if remaining bytes are less than 8) 98 // Assumption is that a block size would be small enough to be represented in 8 bytes varint 99 peekBytes := 8 100 if remainingBytes < int64(peekBytes) { 101 peekBytes = int(remainingBytes) 102 moreContentAvailable = false 103 } 104 logger.Debugf("Remaining bytes=[%d], Going to peek [%d] bytes", remainingBytes, peekBytes) 105 if lenBytes, err = s.reader.Peek(peekBytes); err != nil { 106 return nil, nil, errors.Wrapf(err, "error peeking [%d] bytes from block file", peekBytes) 107 } 108 length, n := proto.DecodeVarint(lenBytes) 109 if n == 0 { 110 // proto.DecodeVarint did not consume any byte at all which means that the bytes 111 // representing the size of the block are partial bytes 112 if !moreContentAvailable { 113 return nil, nil, ErrUnexpectedEndOfBlockfile 114 } 115 panic(errors.Errorf("Error in decoding varint bytes [%#v]", lenBytes)) 116 } 117 bytesExpected := int64(n) + int64(length) 118 if bytesExpected > remainingBytes { 119 logger.Debugf("At least [%d] bytes expected. Remaining bytes = [%d]. Returning with error [%s]", 120 bytesExpected, remainingBytes, ErrUnexpectedEndOfBlockfile) 121 return nil, nil, ErrUnexpectedEndOfBlockfile 122 } 123 // skip the bytes representing the block size 124 if _, err = s.reader.Discard(n); err != nil { 125 return nil, nil, errors.Wrapf(err, "error discarding [%d] bytes", n) 126 } 127 blockBytes := make([]byte, length) 128 if _, err = io.ReadAtLeast(s.reader, blockBytes, int(length)); err != nil { 129 logger.Errorf("Error reading [%d] bytes from file number [%d], error: %s", length, s.fileNum, err) 130 return nil, nil, errors.Wrapf(err, "error reading [%d] bytes from file number [%d]", length, s.fileNum) 131 } 132 blockPlacementInfo := &blockPlacementInfo{ 133 fileNum: s.fileNum, 134 blockStartOffset: s.currentOffset, 135 blockBytesOffset: s.currentOffset + int64(n)} 136 s.currentOffset += int64(n) + int64(length) 137 logger.Debugf("Returning blockbytes - length=[%d], placementInfo={%s}", len(blockBytes), blockPlacementInfo) 138 return blockBytes, blockPlacementInfo, nil 139 } 140 141 func (s *blockfileStream) close() error { 142 return errors.WithStack(s.file.Close()) 143 } 144 145 /////////////////////////////////// 146 // blockStream functions 147 //////////////////////////////////// 148 func newBlockStream(rootDir string, startFileNum int, startOffset int64, endFileNum int) (*blockStream, error) { 149 startFileStream, err := newBlockfileStream(rootDir, startFileNum, startOffset) 150 if err != nil { 151 return nil, err 152 } 153 return &blockStream{rootDir, startFileNum, endFileNum, startFileStream}, nil 154 } 155 156 func (s *blockStream) moveToNextBlockfileStream() error { 157 var err error 158 if err = s.currentFileStream.close(); err != nil { 159 return err 160 } 161 s.currentFileNum++ 162 if s.currentFileStream, err = newBlockfileStream(s.rootDir, s.currentFileNum, 0); err != nil { 163 return err 164 } 165 return nil 166 } 167 168 func (s *blockStream) nextBlockBytes() ([]byte, error) { 169 blockBytes, _, err := s.nextBlockBytesAndPlacementInfo() 170 return blockBytes, err 171 } 172 173 func (s *blockStream) nextBlockBytesAndPlacementInfo() ([]byte, *blockPlacementInfo, error) { 174 var blockBytes []byte 175 var blockPlacementInfo *blockPlacementInfo 176 var err error 177 if blockBytes, blockPlacementInfo, err = s.currentFileStream.nextBlockBytesAndPlacementInfo(); err != nil { 178 logger.Errorf("Error reading next block bytes from file number [%d]: %s", s.currentFileNum, err) 179 return nil, nil, err 180 } 181 logger.Debugf("blockbytes [%d] read from file [%d]", len(blockBytes), s.currentFileNum) 182 if blockBytes == nil && (s.currentFileNum < s.endFileNum || s.endFileNum < 0) { 183 logger.Debugf("current file [%d] exhausted. Moving to next file", s.currentFileNum) 184 if err = s.moveToNextBlockfileStream(); err != nil { 185 return nil, nil, err 186 } 187 return s.nextBlockBytesAndPlacementInfo() 188 } 189 return blockBytes, blockPlacementInfo, nil 190 } 191 192 func (s *blockStream) close() error { 193 return s.currentFileStream.close() 194 } 195 196 func (i *blockPlacementInfo) String() string { 197 return fmt.Sprintf("fileNum=[%d], startOffset=[%d], bytesOffset=[%d]", 198 i.fileNum, i.blockStartOffset, i.blockBytesOffset) 199 }