github.com/iotexproject/iotex-core@v1.14.1-rc1/blockchain/filedao/filedao_v2_util.go (about) 1 // Copyright (c) 2020 IoTeX Foundation 2 // This source code is provided 'as is' and no warranties are given as to title or non-infringement, merchantability 3 // or fitness for purpose and, to the extent permitted by law, all liability for your use of the code is disclaimed. 4 // This source code is governed by Apache License 2.0 that can be found in the LICENSE file. 5 6 package filedao 7 8 import ( 9 "github.com/pkg/errors" 10 11 "github.com/iotexproject/iotex-proto/golang/iotextypes" 12 13 "github.com/iotexproject/iotex-core/action" 14 "github.com/iotexproject/iotex-core/blockchain/block" 15 "github.com/iotexproject/iotex-core/db" 16 "github.com/iotexproject/iotex-core/db/batch" 17 "github.com/iotexproject/iotex-core/pkg/compress" 18 "github.com/iotexproject/iotex-core/pkg/util/byteutil" 19 ) 20 21 func (fd *fileDAOv2) populateStagingBuffer() (*stagingBuffer, error) { 22 buffer := newStagingBuffer(fd.header.BlockStoreSize, fd.deser) 23 blockStoreTip := fd.highestBlockOfStoreTip() 24 for i := uint64(0); i < fd.header.BlockStoreSize; i++ { 25 v, err := fd.kvStore.Get(_headerDataNs, byteutil.Uint64ToBytesBigEndian(i)) 26 if err != nil { 27 if errors.Cause(err) == db.ErrNotExist || errors.Cause(err) == db.ErrBucketNotExist { 28 break 29 } 30 return nil, err 31 } 32 33 v, err = decompBytes(v, fd.header.Compressor) 34 if err != nil { 35 return nil, err 36 } 37 info, err := fd.deser.DeserializeBlockStore(v) 38 if err != nil { 39 return nil, err 40 } 41 42 // populate to staging buffer, if the block is in latest round 43 height := info.Block.Height() 44 if height > blockStoreTip { 45 if _, err = buffer.Put(stagingKey(height, fd.header), v); err != nil { 46 return nil, err 47 } 48 } else { 49 break 50 } 51 } 52 return buffer, nil 53 } 54 55 func (fd *fileDAOv2) putTipHashHeightMapping(blk *block.Block) error { 56 // write height <-> hash mapping 57 h := blk.HashBlock() 58 if err := addOneEntryToBatch(fd.hashStore, h[:], fd.batch); err != nil { 59 return err 60 } 61 62 // write hash <-> height mapping 63 height := blk.Height() 64 fd.batch.Put(_blockHashHeightMappingNS, hashKey(h), byteutil.Uint64ToBytesBigEndian(height), "failed to put hash -> height mapping") 65 66 // update file tip 67 ser, err := (&FileTip{Height: height, Hash: h}).Serialize() 68 if err != nil { 69 return err 70 } 71 fd.batch.Put(_headerDataNs, _topHeightKey, ser, "failed to put file tip") 72 return nil 73 } 74 75 func (fd *fileDAOv2) putBlock(blk *block.Block) error { 76 blkInfo := &block.Store{ 77 Block: blk, 78 Receipts: blk.Receipts, 79 } 80 ser, err := blkInfo.Serialize() 81 if err != nil { 82 return err 83 } 84 blkBytes, err := compBytes(ser, fd.header.Compressor) 85 if err != nil { 86 return err 87 } 88 89 // add to staging buffer 90 index := stagingKey(blk.Height(), fd.header) 91 full, err := fd.blkBuffer.Put(index, ser) 92 if err != nil { 93 return err 94 } 95 if !full { 96 fd.batch.Put(_headerDataNs, byteutil.Uint64ToBytesBigEndian(index), blkBytes, "failed to put block") 97 return nil 98 } 99 100 // pack blocks together, write to block store 101 if ser, err = fd.blkBuffer.Serialize(); err != nil { 102 return err 103 } 104 if blkBytes, err = compBytes(ser, fd.header.Compressor); err != nil { 105 return err 106 } 107 return addOneEntryToBatch(fd.blkStore, blkBytes, fd.batch) 108 } 109 110 func (fd *fileDAOv2) putTransactionLog(blk *block.Block) error { 111 sysLog := blk.TransactionLog() 112 if sysLog == nil { 113 sysLog = &block.BlkTransactionLog{} 114 } 115 logBytes, err := compBytes(sysLog.Serialize(), fd.header.Compressor) 116 if err != nil { 117 return err 118 } 119 return addOneEntryToBatch(fd.sysStore, logBytes, fd.batch) 120 } 121 122 func addOneEntryToBatch(c db.CountingIndex, v []byte, b batch.KVStoreBatch) error { 123 if err := c.UseBatch(b); err != nil { 124 return err 125 } 126 if err := c.Add(v, true); err != nil { 127 return err 128 } 129 return c.Finalize() 130 } 131 132 func compBytes(v []byte, comp string) ([]byte, error) { 133 if comp != "" { 134 return compress.Compress(v, comp) 135 } 136 return v, nil 137 } 138 139 func decompBytes(v []byte, comp string) ([]byte, error) { 140 if comp != "" { 141 return compress.Decompress(v, comp) 142 } 143 return v, nil 144 } 145 146 // blockStoreKey is the slot of block in block storage (each item containing blockStorageBatchSize of blocks) 147 func blockStoreKey(height uint64, header *FileHeader) uint64 { 148 if height <= header.Start { 149 return 0 150 } 151 return (height - header.Start) / header.BlockStoreSize 152 } 153 154 // stagingKey is the position of block in the staging buffer 155 func stagingKey(height uint64, header *FileHeader) uint64 { 156 return (height - header.Start) % header.BlockStoreSize 157 } 158 159 // lowestBlockOfStoreTip is the lowest height of the tip of block storage 160 // used in DeleteTipBlock(), once new tip height drops below this, the tip of block storage can be deleted 161 func (fd *fileDAOv2) lowestBlockOfStoreTip() uint64 { 162 if fd.blkStore.Size() == 0 { 163 return 0 164 } 165 return fd.header.Start + (fd.blkStore.Size()-1)*fd.header.BlockStoreSize 166 } 167 168 // highestBlockOfStoreTip is the highest height of the tip of block storage 169 func (fd *fileDAOv2) highestBlockOfStoreTip() uint64 { 170 if fd.blkStore.Size() == 0 { 171 return fd.header.Start - 1 172 } 173 return fd.header.Start + fd.blkStore.Size()*fd.header.BlockStoreSize - 1 174 } 175 176 func (fd *fileDAOv2) getBlock(height uint64) (*block.Block, error) { 177 if !fd.ContainsHeight(height) { 178 return nil, db.ErrNotExist 179 } 180 // check whether block in staging buffer or not 181 storeKey := blockStoreKey(height, fd.header) 182 if storeKey >= fd.blkStore.Size() { 183 blkStore, err := fd.blkBuffer.Get(stagingKey(height, fd.header)) 184 if err != nil { 185 return nil, err 186 } 187 return blkStore.Block, nil 188 } 189 // check whether block in read cache or not 190 if value, ok := fd.blkCache.Get(height); ok { 191 return value.(*block.Block), nil 192 } 193 // read from storage DB 194 blockStore, err := fd.getBlockStore(height) 195 if err != nil { 196 return nil, err 197 } 198 blk, err := fd.deser.BlockFromBlockStoreProto(blockStore) 199 if err != nil { 200 return nil, err 201 } 202 // add to read cache 203 fd.blkCache.Add(height, blk) 204 return blk, nil 205 } 206 207 func (fd *fileDAOv2) getReceipt(height uint64) ([]*action.Receipt, error) { 208 if !fd.ContainsHeight(height) { 209 return nil, db.ErrNotExist 210 } 211 // check whether block in staging buffer or not 212 storeKey := blockStoreKey(height, fd.header) 213 if storeKey >= fd.blkStore.Size() { 214 blkStore, err := fd.blkBuffer.Get(stagingKey(height, fd.header)) 215 if err != nil { 216 return nil, err 217 } 218 return blkStore.Receipts, nil 219 } 220 // check whether receipts in read cache or not 221 if value, ok := fd.receiptCache.Get(height); ok { 222 return value.([]*action.Receipt), nil 223 } 224 // read from storage DB 225 blockStore, err := fd.getBlockStore(height) 226 if err != nil { 227 return nil, err 228 } 229 receipts, err := fd.deser.ReceiptsFromBlockStoreProto(blockStore) 230 if err != nil { 231 return nil, err 232 } 233 // add to read cache 234 fd.receiptCache.Add(height, receipts) 235 return receipts, nil 236 } 237 238 func (fd *fileDAOv2) getBlockStore(height uint64) (*iotextypes.BlockStore, error) { 239 // check whether blockStore in read cache or not 240 storeKey := blockStoreKey(height, fd.header) 241 if value, ok := fd.blkStorePbCache.Get(storeKey); ok { 242 pbInfos := value.(*iotextypes.BlockStores) 243 return pbInfos.BlockStores[stagingKey(height, fd.header)], nil 244 } 245 // read from storage DB 246 value, err := fd.blkStore.Get(storeKey) 247 if err != nil { 248 return nil, err 249 } 250 value, err = decompBytes(value, fd.header.Compressor) 251 if err != nil { 252 return nil, err 253 } 254 pbStores, err := block.DeserializeBlockStoresPb(value) 255 if err != nil { 256 return nil, err 257 } 258 if len(pbStores.BlockStores) != int(fd.header.BlockStoreSize) { 259 return nil, ErrDataCorruption 260 } 261 // add to read cache 262 fd.blkStorePbCache.Add(storeKey, pbStores) 263 return pbStores.BlockStores[stagingKey(height, fd.header)], nil 264 }