github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/storage/badger/commits.go (about)

     1  package badger
     2  
     3  import (
     4  	"github.com/dgraph-io/badger/v2"
     5  
     6  	"github.com/onflow/flow-go/model/flow"
     7  	"github.com/onflow/flow-go/module"
     8  	"github.com/onflow/flow-go/module/metrics"
     9  	"github.com/onflow/flow-go/storage"
    10  	"github.com/onflow/flow-go/storage/badger/operation"
    11  	"github.com/onflow/flow-go/storage/badger/transaction"
    12  )
    13  
    14  type Commits struct {
    15  	db    *badger.DB
    16  	cache *Cache[flow.Identifier, flow.StateCommitment]
    17  }
    18  
    19  func NewCommits(collector module.CacheMetrics, db *badger.DB) *Commits {
    20  
    21  	store := func(blockID flow.Identifier, commit flow.StateCommitment) func(*transaction.Tx) error {
    22  		return transaction.WithTx(operation.SkipDuplicates(operation.IndexStateCommitment(blockID, commit)))
    23  	}
    24  
    25  	retrieve := func(blockID flow.Identifier) func(tx *badger.Txn) (flow.StateCommitment, error) {
    26  		return func(tx *badger.Txn) (flow.StateCommitment, error) {
    27  			var commit flow.StateCommitment
    28  			err := operation.LookupStateCommitment(blockID, &commit)(tx)
    29  			return commit, err
    30  		}
    31  	}
    32  
    33  	c := &Commits{
    34  		db: db,
    35  		cache: newCache[flow.Identifier, flow.StateCommitment](collector, metrics.ResourceCommit,
    36  			withLimit[flow.Identifier, flow.StateCommitment](1000),
    37  			withStore(store),
    38  			withRetrieve(retrieve),
    39  		),
    40  	}
    41  
    42  	return c
    43  }
    44  
    45  func (c *Commits) storeTx(blockID flow.Identifier, commit flow.StateCommitment) func(*transaction.Tx) error {
    46  	return c.cache.PutTx(blockID, commit)
    47  }
    48  
    49  func (c *Commits) retrieveTx(blockID flow.Identifier) func(tx *badger.Txn) (flow.StateCommitment, error) {
    50  	return func(tx *badger.Txn) (flow.StateCommitment, error) {
    51  		val, err := c.cache.Get(blockID)(tx)
    52  		if err != nil {
    53  			return flow.DummyStateCommitment, err
    54  		}
    55  		return val, nil
    56  	}
    57  }
    58  
    59  func (c *Commits) Store(blockID flow.Identifier, commit flow.StateCommitment) error {
    60  	return operation.RetryOnConflictTx(c.db, transaction.Update, c.storeTx(blockID, commit))
    61  }
    62  
    63  // BatchStore stores Commit keyed by blockID in provided batch
    64  // No errors are expected during normal operation, even if no entries are matched.
    65  // If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned.
    66  func (c *Commits) BatchStore(blockID flow.Identifier, commit flow.StateCommitment, batch storage.BatchStorage) error {
    67  	// we can't cache while using batches, as it's unknown at this point when, and if
    68  	// the batch will be committed. Cache will be populated on read however.
    69  	writeBatch := batch.GetWriter()
    70  	return operation.BatchIndexStateCommitment(blockID, commit)(writeBatch)
    71  }
    72  
    73  func (c *Commits) ByBlockID(blockID flow.Identifier) (flow.StateCommitment, error) {
    74  	tx := c.db.NewTransaction(false)
    75  	defer tx.Discard()
    76  	return c.retrieveTx(blockID)(tx)
    77  }
    78  
    79  func (c *Commits) RemoveByBlockID(blockID flow.Identifier) error {
    80  	return c.db.Update(operation.SkipNonExist(operation.RemoveStateCommitment(blockID)))
    81  }
    82  
    83  // BatchRemoveByBlockID removes Commit keyed by blockID in provided batch
    84  // No errors are expected during normal operation, even if no entries are matched.
    85  // If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned.
    86  func (c *Commits) BatchRemoveByBlockID(blockID flow.Identifier, batch storage.BatchStorage) error {
    87  	writeBatch := batch.GetWriter()
    88  	return operation.BatchRemoveStateCommitment(blockID)(writeBatch)
    89  }