github.com/kaituanwang/hyperledger@v2.0.1+incompatible/core/ledger/pvtdatastorage/store_impl.go (about) 1 /* 2 Copyright IBM Corp. All Rights Reserved. 3 4 SPDX-License-Identifier: Apache-2.0 5 */ 6 7 package pvtdatastorage 8 9 import ( 10 "fmt" 11 "sync" 12 "sync/atomic" 13 "time" 14 15 "github.com/golang/protobuf/proto" 16 "github.com/hyperledger/fabric-protos-go/ledger/rwset" 17 "github.com/hyperledger/fabric/common/flogging" 18 "github.com/hyperledger/fabric/common/ledger/util/leveldbhelper" 19 "github.com/hyperledger/fabric/core/ledger" 20 "github.com/hyperledger/fabric/core/ledger/pvtdatapolicy" 21 "github.com/willf/bitset" 22 ) 23 24 var logger = flogging.MustGetLogger("pvtdatastorage") 25 26 type provider struct { 27 dbProvider *leveldbhelper.Provider 28 pvtData *PrivateDataConfig 29 } 30 31 type store struct { 32 db *leveldbhelper.DBHandle 33 ledgerid string 34 btlPolicy pvtdatapolicy.BTLPolicy 35 batchesInterval int 36 maxBatchSize int 37 purgeInterval uint64 38 39 isEmpty bool 40 lastCommittedBlock uint64 41 purgerLock sync.Mutex 42 collElgProcSync *collElgProcSync 43 // After committing the pvtdata of old blocks, 44 // the `isLastUpdatedOldBlocksSet` is set to true. 45 // Once the stateDB is updated with these pvtdata, 46 // the `isLastUpdatedOldBlocksSet` is set to false. 47 // isLastUpdatedOldBlocksSet is mainly used during the 48 // recovery process. During the peer startup, if the 49 // isLastUpdatedOldBlocksSet is set to true, the pvtdata 50 // in the stateDB needs to be updated before finishing the 51 // recovery operation. 52 isLastUpdatedOldBlocksSet bool 53 } 54 55 type blkTranNumKey []byte 56 57 type dataEntry struct { 58 key *dataKey 59 value *rwset.CollectionPvtReadWriteSet 60 } 61 62 type expiryEntry struct { 63 key *expiryKey 64 value *ExpiryData 65 } 66 67 type expiryKey struct { 68 expiringBlk uint64 69 committingBlk uint64 70 } 71 72 type nsCollBlk struct { 73 ns, coll string 74 blkNum uint64 75 } 76 77 type dataKey struct { 78 nsCollBlk 79 txNum uint64 80 } 81 82 type missingDataKey struct { 83 nsCollBlk 84 isEligible bool 85 } 86 87 type storeEntries struct { 88 dataEntries []*dataEntry 89 expiryEntries []*expiryEntry 90 missingDataEntries map[missingDataKey]*bitset.BitSet 91 } 92 93 // lastUpdatedOldBlocksList keeps the list of last updated blocks 94 // and is stored as the value of lastUpdatedOldBlocksKey (defined in kv_encoding.go) 95 type lastUpdatedOldBlocksList []uint64 96 97 type entriesForPvtDataOfOldBlocks struct { 98 // for each <ns, coll, blkNum, txNum>, store the dataEntry, i.e., pvtData 99 dataEntries map[dataKey]*rwset.CollectionPvtReadWriteSet 100 // store the retrieved (& updated) expiryData in expiryEntries 101 expiryEntries map[expiryKey]*ExpiryData 102 // for each <ns, coll, blkNum>, store the retrieved (& updated) bitmap in the missingDataEntries 103 missingDataEntries map[nsCollBlk]*bitset.BitSet 104 } 105 106 //////// Provider functions ///////////// 107 ////////////////////////////////////////// 108 109 // NewProvider instantiates a StoreProvider 110 func NewProvider(conf *PrivateDataConfig) (Provider, error) { 111 dbProvider, err := leveldbhelper.NewProvider(&leveldbhelper.Conf{DBPath: conf.StorePath}) 112 if err != nil { 113 return nil, err 114 } 115 return &provider{ 116 dbProvider: dbProvider, 117 pvtData: conf, 118 }, nil 119 } 120 121 // OpenStore returns a handle to a store 122 func (p *provider) OpenStore(ledgerid string) (Store, error) { 123 dbHandle := p.dbProvider.GetDBHandle(ledgerid) 124 s := &store{ 125 db: dbHandle, 126 ledgerid: ledgerid, 127 batchesInterval: p.pvtData.BatchesInterval, 128 maxBatchSize: p.pvtData.MaxBatchSize, 129 purgeInterval: uint64(p.pvtData.PurgeInterval), 130 collElgProcSync: &collElgProcSync{ 131 notification: make(chan bool, 1), 132 procComplete: make(chan bool, 1), 133 }, 134 } 135 if err := s.initState(); err != nil { 136 return nil, err 137 } 138 s.launchCollElgProc() 139 logger.Debugf("Pvtdata store opened. Initial state: isEmpty [%t], lastCommittedBlock [%d]", 140 s.isEmpty, s.lastCommittedBlock) 141 return s, nil 142 } 143 144 // Close closes the store 145 func (p *provider) Close() { 146 p.dbProvider.Close() 147 } 148 149 //////// store functions //////////////// 150 ////////////////////////////////////////// 151 152 func (s *store) initState() error { 153 var err error 154 var blist lastUpdatedOldBlocksList 155 if s.isEmpty, s.lastCommittedBlock, err = s.getLastCommittedBlockNum(); err != nil { 156 return err 157 } 158 159 // TODO: FAB-16298 -- the concept of pendingBatch is no longer valid 160 // for pvtdataStore. We can remove it v2.1. We retain the concept in 161 // v2.0 to allow rolling upgrade from v142 to v2.0 162 batchPending, err := s.hasPendingCommit() 163 if err != nil { 164 return err 165 } 166 167 if batchPending { 168 committingBlockNum := s.nextBlockNum() 169 batch := leveldbhelper.NewUpdateBatch() 170 batch.Put(lastCommittedBlkkey, encodeLastCommittedBlockVal(committingBlockNum)) 171 batch.Delete(pendingCommitKey) 172 if err := s.db.WriteBatch(batch, true); err != nil { 173 return err 174 } 175 s.isEmpty = false 176 s.lastCommittedBlock = committingBlockNum 177 } 178 179 if blist, err = s.getLastUpdatedOldBlocksList(); err != nil { 180 return err 181 } 182 if len(blist) > 0 { 183 s.isLastUpdatedOldBlocksSet = true 184 } // false if not set 185 186 return nil 187 } 188 189 func (s *store) Init(btlPolicy pvtdatapolicy.BTLPolicy) { 190 s.btlPolicy = btlPolicy 191 } 192 193 // Prepare implements the function in the interface `Store` 194 func (s *store) Commit(blockNum uint64, pvtData []*ledger.TxPvtData, missingPvtData ledger.TxMissingPvtDataMap) error { 195 expectedBlockNum := s.nextBlockNum() 196 if expectedBlockNum != blockNum { 197 return &ErrIllegalArgs{fmt.Sprintf("Expected block number=%d, received block number=%d", expectedBlockNum, blockNum)} 198 } 199 200 batch := leveldbhelper.NewUpdateBatch() 201 var err error 202 var keyBytes, valBytes []byte 203 204 storeEntries, err := prepareStoreEntries(blockNum, pvtData, s.btlPolicy, missingPvtData) 205 if err != nil { 206 return err 207 } 208 209 for _, dataEntry := range storeEntries.dataEntries { 210 keyBytes = encodeDataKey(dataEntry.key) 211 if valBytes, err = encodeDataValue(dataEntry.value); err != nil { 212 return err 213 } 214 batch.Put(keyBytes, valBytes) 215 } 216 217 for _, expiryEntry := range storeEntries.expiryEntries { 218 keyBytes = encodeExpiryKey(expiryEntry.key) 219 if valBytes, err = encodeExpiryValue(expiryEntry.value); err != nil { 220 return err 221 } 222 batch.Put(keyBytes, valBytes) 223 } 224 225 for missingDataKey, missingDataValue := range storeEntries.missingDataEntries { 226 keyBytes = encodeMissingDataKey(&missingDataKey) 227 if valBytes, err = encodeMissingDataValue(missingDataValue); err != nil { 228 return err 229 } 230 batch.Put(keyBytes, valBytes) 231 } 232 233 committingBlockNum := s.nextBlockNum() 234 logger.Debugf("Committing private data for block [%d]", committingBlockNum) 235 batch.Put(lastCommittedBlkkey, encodeLastCommittedBlockVal(committingBlockNum)) 236 if err := s.db.WriteBatch(batch, true); err != nil { 237 return err 238 } 239 240 s.isEmpty = false 241 atomic.StoreUint64(&s.lastCommittedBlock, committingBlockNum) 242 logger.Debugf("Committed private data for block [%d]", committingBlockNum) 243 s.performPurgeIfScheduled(committingBlockNum) 244 return nil 245 } 246 247 // CommitPvtDataOfOldBlocks commits the pvtData (i.e., previously missing data) of old blocks. 248 // The parameter `blocksPvtData` refers a list of old block's pvtdata which are missing in the pvtstore. 249 // Given a list of old block's pvtData, `CommitPvtDataOfOldBlocks` performs the following four 250 // operations 251 // (1) construct dataEntries for all pvtData 252 // (2) construct update entries (i.e., dataEntries, expiryEntries, missingDataEntries) 253 // from the above created data entries 254 // (3) create a db update batch from the update entries 255 // (4) commit the update batch to the pvtStore 256 func (s *store) CommitPvtDataOfOldBlocks(blocksPvtData map[uint64][]*ledger.TxPvtData) error { 257 if s.isLastUpdatedOldBlocksSet { 258 return &ErrIllegalCall{`The lastUpdatedOldBlocksList is set. It means that the 259 stateDB may not be in sync with the pvtStore`} 260 } 261 262 // (1) construct dataEntries for all pvtData 263 dataEntries := constructDataEntriesFromBlocksPvtData(blocksPvtData) 264 265 // (2) construct update entries (i.e., dataEntries, expiryEntries, missingDataEntries) from the above created data entries 266 logger.Debugf("Constructing pvtdatastore entries for pvtData of [%d] old blocks", len(blocksPvtData)) 267 updateEntries, err := s.constructUpdateEntriesFromDataEntries(dataEntries) 268 if err != nil { 269 return err 270 } 271 272 // (3) create a db update batch from the update entries 273 logger.Debug("Constructing update batch from pvtdatastore entries") 274 batch, err := constructUpdateBatchFromUpdateEntries(updateEntries) 275 if err != nil { 276 return err 277 } 278 279 // (4) commit the update batch to the pvtStore 280 logger.Debug("Committing the update batch to pvtdatastore") 281 if err := s.commitBatch(batch); err != nil { 282 return err 283 } 284 285 return nil 286 } 287 288 func constructDataEntriesFromBlocksPvtData(blocksPvtData map[uint64][]*ledger.TxPvtData) []*dataEntry { 289 // construct dataEntries for all pvtData 290 var dataEntries []*dataEntry 291 for blkNum, pvtData := range blocksPvtData { 292 // prepare the dataEntries for the pvtData 293 dataEntries = append(dataEntries, prepareDataEntries(blkNum, pvtData)...) 294 } 295 return dataEntries 296 } 297 298 func (s *store) constructUpdateEntriesFromDataEntries(dataEntries []*dataEntry) (*entriesForPvtDataOfOldBlocks, error) { 299 updateEntries := &entriesForPvtDataOfOldBlocks{ 300 dataEntries: make(map[dataKey]*rwset.CollectionPvtReadWriteSet), 301 expiryEntries: make(map[expiryKey]*ExpiryData), 302 missingDataEntries: make(map[nsCollBlk]*bitset.BitSet)} 303 304 // for each data entry, first, get the expiryData and missingData from the pvtStore. 305 // Second, update the expiryData and missingData as per the data entry. Finally, add 306 // the data entry along with the updated expiryData and missingData to the update entries 307 for _, dataEntry := range dataEntries { 308 // get the expiryBlk number to construct the expiryKey 309 expiryKey, err := s.constructExpiryKeyFromDataEntry(dataEntry) 310 if err != nil { 311 return nil, err 312 } 313 314 // get the existing expiryData entry 315 var expiryData *ExpiryData 316 if !neverExpires(expiryKey.expiringBlk) { 317 if expiryData, err = s.getExpiryDataFromUpdateEntriesOrStore(updateEntries, expiryKey); err != nil { 318 return nil, err 319 } 320 if expiryData == nil { 321 // data entry is already expired 322 // and purged (a rare scenario) 323 continue 324 } 325 } 326 327 // get the existing missingData entry 328 var missingData *bitset.BitSet 329 nsCollBlk := dataEntry.key.nsCollBlk 330 if missingData, err = s.getMissingDataFromUpdateEntriesOrStore(updateEntries, nsCollBlk); err != nil { 331 return nil, err 332 } 333 if missingData == nil { 334 // data entry is already expired 335 // and purged (a rare scenario) 336 continue 337 } 338 339 updateEntries.addDataEntry(dataEntry) 340 if expiryData != nil { // would be nil for the never expiring entry 341 expiryEntry := &expiryEntry{&expiryKey, expiryData} 342 updateEntries.updateAndAddExpiryEntry(expiryEntry, dataEntry.key) 343 } 344 updateEntries.updateAndAddMissingDataEntry(missingData, dataEntry.key) 345 } 346 return updateEntries, nil 347 } 348 349 func (s *store) constructExpiryKeyFromDataEntry(dataEntry *dataEntry) (expiryKey, error) { 350 // get the expiryBlk number to construct the expiryKey 351 nsCollBlk := dataEntry.key.nsCollBlk 352 expiringBlk, err := s.btlPolicy.GetExpiringBlock(nsCollBlk.ns, nsCollBlk.coll, nsCollBlk.blkNum) 353 if err != nil { 354 return expiryKey{}, err 355 } 356 return expiryKey{expiringBlk, nsCollBlk.blkNum}, nil 357 } 358 359 func (s *store) getExpiryDataFromUpdateEntriesOrStore(updateEntries *entriesForPvtDataOfOldBlocks, expiryKey expiryKey) (*ExpiryData, error) { 360 expiryData, ok := updateEntries.expiryEntries[expiryKey] 361 if !ok { 362 var err error 363 expiryData, err = s.getExpiryDataOfExpiryKey(&expiryKey) 364 if err != nil { 365 return nil, err 366 } 367 } 368 return expiryData, nil 369 } 370 371 func (s *store) getMissingDataFromUpdateEntriesOrStore(updateEntries *entriesForPvtDataOfOldBlocks, nsCollBlk nsCollBlk) (*bitset.BitSet, error) { 372 missingData, ok := updateEntries.missingDataEntries[nsCollBlk] 373 if !ok { 374 var err error 375 missingDataKey := &missingDataKey{nsCollBlk, true} 376 missingData, err = s.getBitmapOfMissingDataKey(missingDataKey) 377 if err != nil { 378 return nil, err 379 } 380 } 381 return missingData, nil 382 } 383 384 func (updateEntries *entriesForPvtDataOfOldBlocks) addDataEntry(dataEntry *dataEntry) { 385 dataKey := dataKey{dataEntry.key.nsCollBlk, dataEntry.key.txNum} 386 updateEntries.dataEntries[dataKey] = dataEntry.value 387 } 388 389 func (updateEntries *entriesForPvtDataOfOldBlocks) updateAndAddExpiryEntry(expiryEntry *expiryEntry, dataKey *dataKey) { 390 txNum := dataKey.txNum 391 nsCollBlk := dataKey.nsCollBlk 392 // update 393 expiryEntry.value.addPresentData(nsCollBlk.ns, nsCollBlk.coll, txNum) 394 // we cannot delete entries from MissingDataMap as 395 // we keep only one entry per missing <ns-col> 396 // irrespective of the number of txNum. 397 398 // add 399 expiryKey := expiryKey{expiryEntry.key.expiringBlk, expiryEntry.key.committingBlk} 400 updateEntries.expiryEntries[expiryKey] = expiryEntry.value 401 } 402 403 func (updateEntries *entriesForPvtDataOfOldBlocks) updateAndAddMissingDataEntry(missingData *bitset.BitSet, dataKey *dataKey) { 404 405 txNum := dataKey.txNum 406 nsCollBlk := dataKey.nsCollBlk 407 // update 408 missingData.Clear(uint(txNum)) 409 // add 410 updateEntries.missingDataEntries[nsCollBlk] = missingData 411 } 412 413 func constructUpdateBatchFromUpdateEntries(updateEntries *entriesForPvtDataOfOldBlocks) (*leveldbhelper.UpdateBatch, error) { 414 batch := leveldbhelper.NewUpdateBatch() 415 416 // add the following four types of entries to the update batch: (1) new data entries 417 // (i.e., pvtData), (2) updated expiry entries, (3) updated missing data entries, and 418 // (4) updated block list 419 420 // (1) add new data entries to the batch 421 if err := addNewDataEntriesToUpdateBatch(batch, updateEntries); err != nil { 422 return nil, err 423 } 424 425 // (2) add updated expiryEntry to the batch 426 if err := addUpdatedExpiryEntriesToUpdateBatch(batch, updateEntries); err != nil { 427 return nil, err 428 } 429 430 // (3) add updated missingData to the batch 431 if err := addUpdatedMissingDataEntriesToUpdateBatch(batch, updateEntries); err != nil { 432 return nil, err 433 } 434 435 return batch, nil 436 } 437 438 func addNewDataEntriesToUpdateBatch(batch *leveldbhelper.UpdateBatch, entries *entriesForPvtDataOfOldBlocks) error { 439 var keyBytes, valBytes []byte 440 var err error 441 for dataKey, pvtData := range entries.dataEntries { 442 keyBytes = encodeDataKey(&dataKey) 443 if valBytes, err = encodeDataValue(pvtData); err != nil { 444 return err 445 } 446 batch.Put(keyBytes, valBytes) 447 } 448 return nil 449 } 450 451 func addUpdatedExpiryEntriesToUpdateBatch(batch *leveldbhelper.UpdateBatch, entries *entriesForPvtDataOfOldBlocks) error { 452 var keyBytes, valBytes []byte 453 var err error 454 for expiryKey, expiryData := range entries.expiryEntries { 455 keyBytes = encodeExpiryKey(&expiryKey) 456 if valBytes, err = encodeExpiryValue(expiryData); err != nil { 457 return err 458 } 459 batch.Put(keyBytes, valBytes) 460 } 461 return nil 462 } 463 464 func addUpdatedMissingDataEntriesToUpdateBatch(batch *leveldbhelper.UpdateBatch, entries *entriesForPvtDataOfOldBlocks) error { 465 var keyBytes, valBytes []byte 466 var err error 467 for nsCollBlk, missingData := range entries.missingDataEntries { 468 keyBytes = encodeMissingDataKey(&missingDataKey{nsCollBlk, true}) 469 // if the missingData is empty, we need to delete the missingDataKey 470 if missingData.None() { 471 batch.Delete(keyBytes) 472 continue 473 } 474 if valBytes, err = encodeMissingDataValue(missingData); err != nil { 475 return err 476 } 477 batch.Put(keyBytes, valBytes) 478 } 479 return nil 480 } 481 482 func (s *store) commitBatch(batch *leveldbhelper.UpdateBatch) error { 483 // commit the batch to the store 484 if err := s.db.WriteBatch(batch, true); err != nil { 485 return err 486 } 487 488 return nil 489 } 490 491 // TODO FAB-16293 -- GetLastUpdatedOldBlocksPvtData() can be removed either in v2.0 or in v2.1. 492 // If we decide to rebuild stateDB in v2.0, by default, the rebuild logic would take 493 // care of synching stateDB with pvtdataStore without calling GetLastUpdatedOldBlocksPvtData(). 494 // Hence, it can be safely removed. Suppose if we decide not to rebuild stateDB in v2.0, 495 // we can remove this function in v2.1. 496 // GetLastUpdatedOldBlocksPvtData implements the function in the interface `Store` 497 func (s *store) GetLastUpdatedOldBlocksPvtData() (map[uint64][]*ledger.TxPvtData, error) { 498 if !s.isLastUpdatedOldBlocksSet { 499 return nil, nil 500 } 501 502 updatedBlksList, err := s.getLastUpdatedOldBlocksList() 503 if err != nil { 504 return nil, err 505 } 506 507 blksPvtData := make(map[uint64][]*ledger.TxPvtData) 508 for _, blkNum := range updatedBlksList { 509 if blksPvtData[blkNum], err = s.GetPvtDataByBlockNum(blkNum, nil); err != nil { 510 return nil, err 511 } 512 } 513 return blksPvtData, nil 514 } 515 516 func (s *store) getLastUpdatedOldBlocksList() ([]uint64, error) { 517 var v []byte 518 var err error 519 if v, err = s.db.Get(lastUpdatedOldBlocksKey); err != nil { 520 return nil, err 521 } 522 if v == nil { 523 return nil, nil 524 } 525 526 var updatedBlksList []uint64 527 buf := proto.NewBuffer(v) 528 numBlks, err := buf.DecodeVarint() 529 if err != nil { 530 return nil, err 531 } 532 for i := 0; i < int(numBlks); i++ { 533 blkNum, err := buf.DecodeVarint() 534 if err != nil { 535 return nil, err 536 } 537 updatedBlksList = append(updatedBlksList, blkNum) 538 } 539 return updatedBlksList, nil 540 } 541 542 // TODO FAB-16294 -- ResetLastUpdatedOldBlocksList() can be removed in v2.1. 543 // From v2.0 onwards, we do not store the last updatedBlksList. Only to support 544 // the rolling upgrade from v142 to v2.0, we retain the ResetLastUpdatedOldBlocksList() 545 // in v2.0. 546 547 // ResetLastUpdatedOldBlocksList implements the function in the interface `Store` 548 func (s *store) ResetLastUpdatedOldBlocksList() error { 549 batch := leveldbhelper.NewUpdateBatch() 550 batch.Delete(lastUpdatedOldBlocksKey) 551 if err := s.db.WriteBatch(batch, true); err != nil { 552 return err 553 } 554 s.isLastUpdatedOldBlocksSet = false 555 return nil 556 } 557 558 // GetPvtDataByBlockNum implements the function in the interface `Store`. 559 // If the store is empty or the last committed block number is smaller then the 560 // requested block number, an 'ErrOutOfRange' is thrown 561 func (s *store) GetPvtDataByBlockNum(blockNum uint64, filter ledger.PvtNsCollFilter) ([]*ledger.TxPvtData, error) { 562 logger.Debugf("Get private data for block [%d], filter=%#v", blockNum, filter) 563 if s.isEmpty { 564 return nil, &ErrOutOfRange{"The store is empty"} 565 } 566 lastCommittedBlock := atomic.LoadUint64(&s.lastCommittedBlock) 567 if blockNum > lastCommittedBlock { 568 return nil, &ErrOutOfRange{fmt.Sprintf("Last committed block=%d, block requested=%d", lastCommittedBlock, blockNum)} 569 } 570 startKey, endKey := getDataKeysForRangeScanByBlockNum(blockNum) 571 logger.Debugf("Querying private data storage for write sets using startKey=%#v, endKey=%#v", startKey, endKey) 572 itr := s.db.GetIterator(startKey, endKey) 573 defer itr.Release() 574 575 var blockPvtdata []*ledger.TxPvtData 576 var currentTxNum uint64 577 var currentTxWsetAssember *txPvtdataAssembler 578 firstItr := true 579 580 for itr.Next() { 581 dataKeyBytes := itr.Key() 582 v11Fmt, err := v11Format(dataKeyBytes) 583 if err != nil { 584 return nil, err 585 } 586 if v11Fmt { 587 return v11RetrievePvtdata(itr, filter) 588 } 589 dataValueBytes := itr.Value() 590 dataKey, err := decodeDatakey(dataKeyBytes) 591 if err != nil { 592 return nil, err 593 } 594 expired, err := isExpired(dataKey.nsCollBlk, s.btlPolicy, lastCommittedBlock) 595 if err != nil { 596 return nil, err 597 } 598 if expired || !passesFilter(dataKey, filter) { 599 continue 600 } 601 dataValue, err := decodeDataValue(dataValueBytes) 602 if err != nil { 603 return nil, err 604 } 605 606 if firstItr { 607 currentTxNum = dataKey.txNum 608 currentTxWsetAssember = newTxPvtdataAssembler(blockNum, currentTxNum) 609 firstItr = false 610 } 611 612 if dataKey.txNum != currentTxNum { 613 blockPvtdata = append(blockPvtdata, currentTxWsetAssember.getTxPvtdata()) 614 currentTxNum = dataKey.txNum 615 currentTxWsetAssember = newTxPvtdataAssembler(blockNum, currentTxNum) 616 } 617 currentTxWsetAssember.add(dataKey.ns, dataValue) 618 } 619 if currentTxWsetAssember != nil { 620 blockPvtdata = append(blockPvtdata, currentTxWsetAssember.getTxPvtdata()) 621 } 622 return blockPvtdata, nil 623 } 624 625 // TODO: FAB-16297 -- Remove init() as it is no longer needed. The private data feature 626 // became stable from v1.2 onwards. To allow the initiation of pvtdata store with non-zero 627 // block height (mainly during a rolling upgrade from an existing v1.1 network to v1.2), 628 // we introduced pvtdata init() function which would take the height of block store and 629 // set it as a height of pvtdataStore. From v2.0 onwards, it is no longer needed as we do 630 // not support a rolling upgrade from v1.1 to v2.0 631 632 // InitLastCommittedBlock implements the function in the interface `Store` 633 func (s *store) InitLastCommittedBlock(blockNum uint64) error { 634 if !s.isEmpty { 635 return &ErrIllegalCall{"The private data store is not empty. InitLastCommittedBlock() function call is not allowed"} 636 } 637 batch := leveldbhelper.NewUpdateBatch() 638 batch.Put(lastCommittedBlkkey, encodeLastCommittedBlockVal(blockNum)) 639 if err := s.db.WriteBatch(batch, true); err != nil { 640 return err 641 } 642 s.isEmpty = false 643 s.lastCommittedBlock = blockNum 644 logger.Debugf("InitLastCommittedBlock set to block [%d]", blockNum) 645 return nil 646 } 647 648 // GetMissingPvtDataInfoForMostRecentBlocks implements the function in the interface `Store` 649 func (s *store) GetMissingPvtDataInfoForMostRecentBlocks(maxBlock int) (ledger.MissingPvtDataInfo, error) { 650 // we assume that this function would be called by the gossip only after processing the 651 // last retrieved missing pvtdata info and committing the same. 652 if maxBlock < 1 { 653 return nil, nil 654 } 655 656 missingPvtDataInfo := make(ledger.MissingPvtDataInfo) 657 numberOfBlockProcessed := 0 658 lastProcessedBlock := uint64(0) 659 isMaxBlockLimitReached := false 660 // as we are not acquiring a read lock, new blocks can get committed while we 661 // construct the MissingPvtDataInfo. As a result, lastCommittedBlock can get 662 // changed. To ensure consistency, we atomically load the lastCommittedBlock value 663 lastCommittedBlock := atomic.LoadUint64(&s.lastCommittedBlock) 664 665 startKey, endKey := createRangeScanKeysForEligibleMissingDataEntries(lastCommittedBlock) 666 dbItr := s.db.GetIterator(startKey, endKey) 667 defer dbItr.Release() 668 669 for dbItr.Next() { 670 missingDataKeyBytes := dbItr.Key() 671 missingDataKey := decodeMissingDataKey(missingDataKeyBytes) 672 673 if isMaxBlockLimitReached && (missingDataKey.blkNum != lastProcessedBlock) { 674 // ensures that exactly maxBlock number 675 // of blocks' entries are processed 676 break 677 } 678 679 // check whether the entry is expired. If so, move to the next item. 680 // As we may use the old lastCommittedBlock value, there is a possibility that 681 // this missing data is actually expired but we may get the stale information. 682 // Though it may leads to extra work of pulling the expired data, it will not 683 // affect the correctness. Further, as we try to fetch the most recent missing 684 // data (less possibility of expiring now), such scenario would be rare. In the 685 // best case, we can load the latest lastCommittedBlock value here atomically to 686 // make this scenario very rare. 687 lastCommittedBlock = atomic.LoadUint64(&s.lastCommittedBlock) 688 expired, err := isExpired(missingDataKey.nsCollBlk, s.btlPolicy, lastCommittedBlock) 689 if err != nil { 690 return nil, err 691 } 692 if expired { 693 continue 694 } 695 696 // check for an existing entry for the blkNum in the MissingPvtDataInfo. 697 // If no such entry exists, create one. Also, keep track of the number of 698 // processed block due to maxBlock limit. 699 if _, ok := missingPvtDataInfo[missingDataKey.blkNum]; !ok { 700 numberOfBlockProcessed++ 701 if numberOfBlockProcessed == maxBlock { 702 isMaxBlockLimitReached = true 703 // as there can be more than one entry for this block, 704 // we cannot `break` here 705 lastProcessedBlock = missingDataKey.blkNum 706 } 707 } 708 709 valueBytes := dbItr.Value() 710 bitmap, err := decodeMissingDataValue(valueBytes) 711 if err != nil { 712 return nil, err 713 } 714 715 // for each transaction which misses private data, make an entry in missingBlockPvtDataInfo 716 for index, isSet := bitmap.NextSet(0); isSet; index, isSet = bitmap.NextSet(index + 1) { 717 txNum := uint64(index) 718 missingPvtDataInfo.Add(missingDataKey.blkNum, txNum, missingDataKey.ns, missingDataKey.coll) 719 } 720 } 721 722 return missingPvtDataInfo, nil 723 } 724 725 // ProcessCollsEligibilityEnabled implements the function in the interface `Store` 726 func (s *store) ProcessCollsEligibilityEnabled(committingBlk uint64, nsCollMap map[string][]string) error { 727 key := encodeCollElgKey(committingBlk) 728 m := newCollElgInfo(nsCollMap) 729 val, err := encodeCollElgVal(m) 730 if err != nil { 731 return err 732 } 733 batch := leveldbhelper.NewUpdateBatch() 734 batch.Put(key, val) 735 if err = s.db.WriteBatch(batch, true); err != nil { 736 return err 737 } 738 s.collElgProcSync.notify() 739 return nil 740 } 741 742 func (s *store) performPurgeIfScheduled(latestCommittedBlk uint64) { 743 if latestCommittedBlk%s.purgeInterval != 0 { 744 return 745 } 746 go func() { 747 s.purgerLock.Lock() 748 logger.Debugf("Purger started: Purging expired private data till block number [%d]", latestCommittedBlk) 749 defer s.purgerLock.Unlock() 750 err := s.purgeExpiredData(0, latestCommittedBlk) 751 if err != nil { 752 logger.Warningf("Could not purge data from pvtdata store:%s", err) 753 } 754 logger.Debug("Purger finished") 755 }() 756 } 757 758 func (s *store) purgeExpiredData(minBlkNum, maxBlkNum uint64) error { 759 batch := leveldbhelper.NewUpdateBatch() 760 expiryEntries, err := s.retrieveExpiryEntries(minBlkNum, maxBlkNum) 761 if err != nil || len(expiryEntries) == 0 { 762 return err 763 } 764 for _, expiryEntry := range expiryEntries { 765 // this encoding could have been saved if the function retrieveExpiryEntries also returns the encoded expiry keys. 766 // However, keeping it for better readability 767 batch.Delete(encodeExpiryKey(expiryEntry.key)) 768 dataKeys, missingDataKeys := deriveKeys(expiryEntry) 769 for _, dataKey := range dataKeys { 770 batch.Delete(encodeDataKey(dataKey)) 771 } 772 for _, missingDataKey := range missingDataKeys { 773 batch.Delete(encodeMissingDataKey(missingDataKey)) 774 } 775 s.db.WriteBatch(batch, false) 776 } 777 logger.Infof("[%s] - [%d] Entries purged from private data storage till block number [%d]", s.ledgerid, len(expiryEntries), maxBlkNum) 778 return nil 779 } 780 781 func (s *store) retrieveExpiryEntries(minBlkNum, maxBlkNum uint64) ([]*expiryEntry, error) { 782 startKey, endKey := getExpiryKeysForRangeScan(minBlkNum, maxBlkNum) 783 logger.Debugf("retrieveExpiryEntries(): startKey=%#v, endKey=%#v", startKey, endKey) 784 itr := s.db.GetIterator(startKey, endKey) 785 defer itr.Release() 786 787 var expiryEntries []*expiryEntry 788 for itr.Next() { 789 expiryKeyBytes := itr.Key() 790 expiryValueBytes := itr.Value() 791 expiryKey, err := decodeExpiryKey(expiryKeyBytes) 792 if err != nil { 793 return nil, err 794 } 795 expiryValue, err := decodeExpiryValue(expiryValueBytes) 796 if err != nil { 797 return nil, err 798 } 799 expiryEntries = append(expiryEntries, &expiryEntry{key: expiryKey, value: expiryValue}) 800 } 801 return expiryEntries, nil 802 } 803 804 func (s *store) launchCollElgProc() { 805 go func() { 806 s.processCollElgEvents() // process collection eligibility events when store is opened - in case there is an unprocessed events from previous run 807 for { 808 logger.Debugf("Waiting for collection eligibility event") 809 s.collElgProcSync.waitForNotification() 810 s.processCollElgEvents() 811 s.collElgProcSync.done() 812 } 813 }() 814 } 815 816 func (s *store) processCollElgEvents() { 817 logger.Debugf("Starting to process collection eligibility events") 818 s.purgerLock.Lock() 819 defer s.purgerLock.Unlock() 820 collElgStartKey, collElgEndKey := createRangeScanKeysForCollElg() 821 eventItr := s.db.GetIterator(collElgStartKey, collElgEndKey) 822 defer eventItr.Release() 823 batch := leveldbhelper.NewUpdateBatch() 824 totalEntriesConverted := 0 825 826 for eventItr.Next() { 827 collElgKey, collElgVal := eventItr.Key(), eventItr.Value() 828 blkNum := decodeCollElgKey(collElgKey) 829 CollElgInfo, err := decodeCollElgVal(collElgVal) 830 logger.Debugf("Processing collection eligibility event [blkNum=%d], CollElgInfo=%s", blkNum, CollElgInfo) 831 if err != nil { 832 logger.Errorf("This error is not expected %s", err) 833 continue 834 } 835 for ns, colls := range CollElgInfo.NsCollMap { 836 var coll string 837 for _, coll = range colls.Entries { 838 logger.Infof("Converting missing data entries from ineligible to eligible for [ns=%s, coll=%s]", ns, coll) 839 startKey, endKey := createRangeScanKeysForIneligibleMissingData(blkNum, ns, coll) 840 collItr := s.db.GetIterator(startKey, endKey) 841 collEntriesConverted := 0 842 843 for collItr.Next() { // each entry 844 originalKey, originalVal := collItr.Key(), collItr.Value() 845 modifiedKey := decodeMissingDataKey(originalKey) 846 modifiedKey.isEligible = true 847 batch.Delete(originalKey) 848 copyVal := make([]byte, len(originalVal)) 849 copy(copyVal, originalVal) 850 batch.Put(encodeMissingDataKey(modifiedKey), copyVal) 851 collEntriesConverted++ 852 if batch.Len() > s.maxBatchSize { 853 s.db.WriteBatch(batch, true) 854 batch = leveldbhelper.NewUpdateBatch() 855 sleepTime := time.Duration(s.batchesInterval) 856 logger.Infof("Going to sleep for %d milliseconds between batches. Entries for [ns=%s, coll=%s] converted so far = %d", 857 sleepTime, ns, coll, collEntriesConverted) 858 s.purgerLock.Unlock() 859 time.Sleep(sleepTime * time.Millisecond) 860 s.purgerLock.Lock() 861 } 862 } // entry loop 863 864 collItr.Release() 865 logger.Infof("Converted all [%d] entries for [ns=%s, coll=%s]", collEntriesConverted, ns, coll) 866 totalEntriesConverted += collEntriesConverted 867 } // coll loop 868 } // ns loop 869 batch.Delete(collElgKey) // delete the collection eligibility event key as well 870 } // event loop 871 872 s.db.WriteBatch(batch, true) 873 logger.Debugf("Converted [%d] ineligible missing data entries to eligible", totalEntriesConverted) 874 } 875 876 // LastCommittedBlockHeight implements the function in the interface `Store` 877 func (s *store) LastCommittedBlockHeight() (uint64, error) { 878 if s.isEmpty { 879 return 0, nil 880 } 881 return atomic.LoadUint64(&s.lastCommittedBlock) + 1, nil 882 } 883 884 // IsEmpty implements the function in the interface `Store` 885 func (s *store) IsEmpty() (bool, error) { 886 return s.isEmpty, nil 887 } 888 889 // Shutdown implements the function in the interface `Store` 890 func (s *store) Shutdown() { 891 // do nothing 892 } 893 894 func (s *store) nextBlockNum() uint64 { 895 if s.isEmpty { 896 return 0 897 } 898 return atomic.LoadUint64(&s.lastCommittedBlock) + 1 899 } 900 901 // TODO: FAB-16298 -- the concept of pendingBatch is no longer valid 902 // for pvtdataStore. We can remove it v2.1. We retain the concept in 903 // v2.0 to allow rolling upgrade from v142 to v2.0 904 func (s *store) hasPendingCommit() (bool, error) { 905 var v []byte 906 var err error 907 if v, err = s.db.Get(pendingCommitKey); err != nil { 908 return false, err 909 } 910 return v != nil, nil 911 } 912 913 func (s *store) getLastCommittedBlockNum() (bool, uint64, error) { 914 var v []byte 915 var err error 916 if v, err = s.db.Get(lastCommittedBlkkey); v == nil || err != nil { 917 return true, 0, err 918 } 919 return false, decodeLastCommittedBlockVal(v), nil 920 } 921 922 type collElgProcSync struct { 923 notification, procComplete chan bool 924 } 925 926 func (sync *collElgProcSync) notify() { 927 select { 928 case sync.notification <- true: 929 logger.Debugf("Signaled to collection eligibility processing routine") 930 default: //noop 931 logger.Debugf("Previous signal still pending. Skipping new signal") 932 } 933 } 934 935 func (sync *collElgProcSync) waitForNotification() { 936 <-sync.notification 937 } 938 939 func (sync *collElgProcSync) done() { 940 select { 941 case sync.procComplete <- true: 942 default: 943 } 944 } 945 946 func (sync *collElgProcSync) waitForDone() { 947 <-sync.procComplete 948 } 949 950 func (s *store) getBitmapOfMissingDataKey(missingDataKey *missingDataKey) (*bitset.BitSet, error) { 951 var v []byte 952 var err error 953 if v, err = s.db.Get(encodeMissingDataKey(missingDataKey)); err != nil { 954 return nil, err 955 } 956 if v == nil { 957 return nil, nil 958 } 959 return decodeMissingDataValue(v) 960 } 961 962 func (s *store) getExpiryDataOfExpiryKey(expiryKey *expiryKey) (*ExpiryData, error) { 963 var v []byte 964 var err error 965 if v, err = s.db.Get(encodeExpiryKey(expiryKey)); err != nil { 966 return nil, err 967 } 968 if v == nil { 969 return nil, nil 970 } 971 return decodeExpiryValue(v) 972 }