github.com/osdi23p228/fabric@v0.0.0-20221218062954-77808885f5db/core/ledger/pvtdatastorage/reconcile_missing_pvtdata.go (about) 1 /* 2 Copyright IBM Corp. All Rights Reserved. 3 4 SPDX-License-Identifier: Apache-2.0 5 */ 6 7 package pvtdatastorage 8 9 import ( 10 "github.com/hyperledger/fabric-protos-go/ledger/rwset" 11 "github.com/osdi23p228/fabric/common/ledger/util/leveldbhelper" 12 "github.com/osdi23p228/fabric/core/ledger" 13 "github.com/pkg/errors" 14 "github.com/willf/bitset" 15 ) 16 17 // CommitPvtDataOfOldBlocks commits the pvtData (i.e., previously missing data) of old blockp. 18 // The parameter `blocksPvtData` refers a list of old block's pvtdata which are missing in the pvtstore. 19 // Given a list of old block's pvtData, `CommitPvtDataOfOldBlocks` performs the following three 20 // operations 21 // (1) construct update entries (i.e., dataEntries, expiryEntries, missingDataEntries) 22 // from the above created data entries 23 // (2) create a db update batch from the update entries 24 // (3) commit the update batch to the pvtStore 25 func (s *Store) CommitPvtDataOfOldBlocks( 26 blocksPvtData map[uint64][]*ledger.TxPvtData, 27 unreconciledMissingData ledger.MissingPvtDataInfo, 28 ) error { 29 s.purgerLock.Lock() 30 defer s.purgerLock.Unlock() 31 32 deprioritizedMissingData := unreconciledMissingData 33 34 if s.isLastUpdatedOldBlocksSet { 35 return &ErrIllegalCall{`The lastUpdatedOldBlocksList is set. It means that the 36 stateDB may not be in sync with the pvtStore`} 37 } 38 39 p := &oldBlockDataProcessor{ 40 Store: s, 41 entries: &entriesForPvtDataOfOldBlocks{ 42 dataEntries: make(map[dataKey]*rwset.CollectionPvtReadWriteSet), 43 expiryEntries: make(map[expiryKey]*ExpiryData), 44 prioritizedMissingDataEntries: make(map[nsCollBlk]*bitset.BitSet), 45 deprioritizedMissingDataEntries: make(map[nsCollBlk]*bitset.BitSet), 46 }, 47 } 48 49 if err := p.prepareDataAndExpiryEntries(blocksPvtData); err != nil { 50 return err 51 } 52 53 if err := p.prepareMissingDataEntriesToReflectReconciledData(); err != nil { 54 return err 55 } 56 57 if err := p.prepareMissingDataEntriesToReflectPriority(deprioritizedMissingData); err != nil { 58 return err 59 } 60 61 batch, err := p.constructDBUpdateBatch() 62 if err != nil { 63 return err 64 } 65 return s.db.WriteBatch(batch, true) 66 } 67 68 type oldBlockDataProcessor struct { 69 *Store 70 entries *entriesForPvtDataOfOldBlocks 71 } 72 73 func (p *oldBlockDataProcessor) prepareDataAndExpiryEntries(blocksPvtData map[uint64][]*ledger.TxPvtData) error { 74 var dataEntries []*dataEntry 75 var expData *ExpiryData 76 77 for blkNum, pvtData := range blocksPvtData { 78 dataEntries = append(dataEntries, prepareDataEntries(blkNum, pvtData)...) 79 } 80 81 for _, dataEntry := range dataEntries { 82 nsCollBlk := dataEntry.key.nsCollBlk 83 txNum := dataEntry.key.txNum 84 85 expKey, err := p.constructExpiryKey(dataEntry) 86 if err != nil { 87 return err 88 } 89 90 if neverExpires(expKey.expiringBlk) { 91 p.entries.dataEntries[*dataEntry.key] = dataEntry.value 92 continue 93 } 94 95 if expData, err = p.getExpiryDataFromEntriesOrStore(expKey); err != nil { 96 return err 97 } 98 if expData == nil { 99 // if expiryData is not available, it means that 100 // the pruge scheduler removed these entries and the 101 // associated data entry is no longer needed. Note 102 // that the associated missingData entry would also 103 // be not present. Hence, we can skip this data entry. 104 continue 105 } 106 expData.addPresentData(nsCollBlk.ns, nsCollBlk.coll, txNum) 107 108 p.entries.dataEntries[*dataEntry.key] = dataEntry.value 109 p.entries.expiryEntries[expKey] = expData 110 } 111 return nil 112 } 113 114 func (p *oldBlockDataProcessor) prepareMissingDataEntriesToReflectReconciledData() error { 115 for dataKey := range p.entries.dataEntries { 116 key := dataKey.nsCollBlk 117 txNum := uint(dataKey.txNum) 118 119 prioMissingData, err := p.getPrioMissingDataFromEntriesOrStore(key) 120 if err != nil { 121 return err 122 } 123 if prioMissingData != nil && prioMissingData.Test(txNum) { 124 p.entries.prioritizedMissingDataEntries[key] = prioMissingData.Clear(txNum) 125 continue 126 } 127 128 deprioMissingData, err := p.getDeprioMissingDataFromEntriesOrStore(key) 129 if err != nil { 130 return err 131 } 132 if deprioMissingData != nil && deprioMissingData.Test(txNum) { 133 p.entries.deprioritizedMissingDataEntries[key] = deprioMissingData.Clear(txNum) 134 } 135 } 136 137 return nil 138 } 139 140 func (p *oldBlockDataProcessor) prepareMissingDataEntriesToReflectPriority(deprioritizedList ledger.MissingPvtDataInfo) error { 141 for blkNum, blkMissingData := range deprioritizedList { 142 for txNum, txMissingData := range blkMissingData { 143 for _, nsColl := range txMissingData { 144 key := nsCollBlk{ 145 ns: nsColl.Namespace, 146 coll: nsColl.Collection, 147 blkNum: blkNum, 148 } 149 txNum := uint(txNum) 150 151 prioMissingData, err := p.getPrioMissingDataFromEntriesOrStore(key) 152 if err != nil { 153 return err 154 } 155 if prioMissingData == nil { 156 // we would reach here when either of the following happens: 157 // (1) when the purge scheduler already removed the respective 158 // missing data entry. 159 // (2) when the missing data info is already persistent in the 160 // deprioritized list. Currently, we do not have different 161 // levels of deprioritized list. 162 // In both of the above case, we can continue to the next entry. 163 continue 164 } 165 p.entries.prioritizedMissingDataEntries[key] = prioMissingData.Clear(txNum) 166 167 deprioMissingData, err := p.getDeprioMissingDataFromEntriesOrStore(key) 168 if err != nil { 169 return err 170 } 171 if deprioMissingData == nil { 172 deprioMissingData = &bitset.BitSet{} 173 } 174 p.entries.deprioritizedMissingDataEntries[key] = deprioMissingData.Set(txNum) 175 } 176 } 177 } 178 179 return nil 180 } 181 182 func (p *oldBlockDataProcessor) constructExpiryKey(dataEntry *dataEntry) (expiryKey, error) { 183 // get the expiryBlk number to construct the expiryKey 184 nsCollBlk := dataEntry.key.nsCollBlk 185 expiringBlk, err := p.btlPolicy.GetExpiringBlock(nsCollBlk.ns, nsCollBlk.coll, nsCollBlk.blkNum) 186 if err != nil { 187 return expiryKey{}, errors.WithMessagef(err, "error while constructing expiry data key") 188 } 189 190 return expiryKey{ 191 expiringBlk: expiringBlk, 192 committingBlk: nsCollBlk.blkNum, 193 }, nil 194 } 195 196 func (p *oldBlockDataProcessor) getExpiryDataFromEntriesOrStore(expKey expiryKey) (*ExpiryData, error) { 197 if expiryData, ok := p.entries.expiryEntries[expKey]; ok { 198 return expiryData, nil 199 } 200 201 expData, err := p.db.Get(encodeExpiryKey(&expKey)) 202 if err != nil { 203 return nil, err 204 } 205 if expData == nil { 206 return nil, nil 207 } 208 209 return decodeExpiryValue(expData) 210 } 211 212 func (p *oldBlockDataProcessor) getPrioMissingDataFromEntriesOrStore(nsCollBlk nsCollBlk) (*bitset.BitSet, error) { 213 missingData, ok := p.entries.prioritizedMissingDataEntries[nsCollBlk] 214 if ok { 215 return missingData, nil 216 } 217 218 missingKey := &missingDataKey{ 219 nsCollBlk: nsCollBlk, 220 } 221 key := encodeElgPrioMissingDataKey(missingKey) 222 223 encMissingData, err := p.db.Get(key) 224 if err != nil { 225 return nil, errors.Wrap(err, "error while getting missing data bitmap from the store") 226 } 227 if encMissingData == nil { 228 return nil, nil 229 } 230 231 return decodeMissingDataValue(encMissingData) 232 } 233 234 func (p *oldBlockDataProcessor) getDeprioMissingDataFromEntriesOrStore(nsCollBlk nsCollBlk) (*bitset.BitSet, error) { 235 missingData, ok := p.entries.deprioritizedMissingDataEntries[nsCollBlk] 236 if ok { 237 return missingData, nil 238 } 239 240 missingKey := &missingDataKey{ 241 nsCollBlk: nsCollBlk, 242 } 243 key := encodeElgDeprioMissingDataKey(missingKey) 244 245 encMissingData, err := p.db.Get(key) 246 if err != nil { 247 return nil, errors.Wrap(err, "error while getting missing data bitmap from the store") 248 } 249 if encMissingData == nil { 250 return nil, nil 251 } 252 253 return decodeMissingDataValue(encMissingData) 254 } 255 256 func (p *oldBlockDataProcessor) constructDBUpdateBatch() (*leveldbhelper.UpdateBatch, error) { 257 batch := p.db.NewUpdateBatch() 258 259 if err := p.entries.addDataEntriesTo(batch); err != nil { 260 return nil, errors.WithMessage(err, "error while adding data entries to the update batch") 261 } 262 263 if err := p.entries.addExpiryEntriesTo(batch); err != nil { 264 return nil, errors.WithMessage(err, "error while adding expiry entries to the update batch") 265 } 266 267 if err := p.entries.addElgPrioMissingDataEntriesTo(batch); err != nil { 268 return nil, errors.WithMessage(err, "error while adding eligible prioritized missing data entries to the update batch") 269 } 270 271 if err := p.entries.addElgDeprioMissingDataEntriesTo(batch); err != nil { 272 return nil, errors.WithMessage(err, "error while adding eligible deprioritized missing data entries to the update batch") 273 } 274 275 return batch, nil 276 } 277 278 type entriesForPvtDataOfOldBlocks struct { 279 dataEntries map[dataKey]*rwset.CollectionPvtReadWriteSet 280 expiryEntries map[expiryKey]*ExpiryData 281 prioritizedMissingDataEntries map[nsCollBlk]*bitset.BitSet 282 deprioritizedMissingDataEntries map[nsCollBlk]*bitset.BitSet 283 } 284 285 func (e *entriesForPvtDataOfOldBlocks) addDataEntriesTo(batch *leveldbhelper.UpdateBatch) error { 286 var key, val []byte 287 var err error 288 289 for dataKey, pvtData := range e.dataEntries { 290 key = encodeDataKey(&dataKey) 291 if val, err = encodeDataValue(pvtData); err != nil { 292 return errors.Wrap(err, "error while encoding data value") 293 } 294 batch.Put(key, val) 295 } 296 return nil 297 } 298 299 func (e *entriesForPvtDataOfOldBlocks) addExpiryEntriesTo(batch *leveldbhelper.UpdateBatch) error { 300 var key, val []byte 301 var err error 302 303 for expiryKey, expiryData := range e.expiryEntries { 304 key = encodeExpiryKey(&expiryKey) 305 if val, err = encodeExpiryValue(expiryData); err != nil { 306 return errors.Wrap(err, "error while encoding expiry value") 307 } 308 batch.Put(key, val) 309 } 310 return nil 311 } 312 313 func (e *entriesForPvtDataOfOldBlocks) addElgPrioMissingDataEntriesTo(batch *leveldbhelper.UpdateBatch) error { 314 var key, val []byte 315 var err error 316 317 for nsCollBlk, missingData := range e.prioritizedMissingDataEntries { 318 missingKey := &missingDataKey{ 319 nsCollBlk: nsCollBlk, 320 } 321 key = encodeElgPrioMissingDataKey(missingKey) 322 323 if missingData.None() { 324 batch.Delete(key) 325 continue 326 } 327 328 if val, err = encodeMissingDataValue(missingData); err != nil { 329 return errors.Wrap(err, "error while encoding missing data bitmap") 330 } 331 batch.Put(key, val) 332 } 333 return nil 334 } 335 336 func (e *entriesForPvtDataOfOldBlocks) addElgDeprioMissingDataEntriesTo(batch *leveldbhelper.UpdateBatch) error { 337 var key, val []byte 338 var err error 339 340 for nsCollBlk, missingData := range e.deprioritizedMissingDataEntries { 341 missingKey := &missingDataKey{ 342 nsCollBlk: nsCollBlk, 343 } 344 key = encodeElgDeprioMissingDataKey(missingKey) 345 346 if missingData.None() { 347 batch.Delete(key) 348 continue 349 } 350 351 if val, err = encodeMissingDataValue(missingData); err != nil { 352 return errors.Wrap(err, "error while encoding missing data bitmap") 353 } 354 batch.Put(key, val) 355 } 356 return nil 357 }