github.com/koko1123/flow-go-1@v0.29.6/state/cluster/badger/mutator.go (about)

     1  package badger
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"fmt"
     7  	"math"
     8  
     9  	"github.com/dgraph-io/badger/v3"
    10  
    11  	"github.com/koko1123/flow-go-1/model/cluster"
    12  	"github.com/koko1123/flow-go-1/model/flow"
    13  	"github.com/koko1123/flow-go-1/module"
    14  	"github.com/koko1123/flow-go-1/module/trace"
    15  	"github.com/koko1123/flow-go-1/state"
    16  	"github.com/koko1123/flow-go-1/state/fork"
    17  	"github.com/koko1123/flow-go-1/storage"
    18  	"github.com/koko1123/flow-go-1/storage/badger/operation"
    19  	"github.com/koko1123/flow-go-1/storage/badger/procedure"
    20  )
    21  
    22  type MutableState struct {
    23  	*State
    24  	tracer   module.Tracer
    25  	headers  storage.Headers
    26  	payloads storage.ClusterPayloads
    27  }
    28  
    29  func NewMutableState(state *State, tracer module.Tracer, headers storage.Headers, payloads storage.ClusterPayloads) (*MutableState, error) {
    30  	mutableState := &MutableState{
    31  		State:    state,
    32  		tracer:   tracer,
    33  		headers:  headers,
    34  		payloads: payloads,
    35  	}
    36  	return mutableState, nil
    37  }
    38  
    39  // TODO (Ramtin) pass context here
    40  func (m *MutableState) Extend(block *cluster.Block) error {
    41  
    42  	blockID := block.ID()
    43  
    44  	span, ctx := m.tracer.StartCollectionSpan(context.Background(), blockID, trace.COLClusterStateMutatorExtend)
    45  	defer span.End()
    46  
    47  	err := m.State.db.View(func(tx *badger.Txn) error {
    48  
    49  		setupSpan, _ := m.tracer.StartSpanFromContext(ctx, trace.COLClusterStateMutatorExtendSetup)
    50  
    51  		header := block.Header
    52  		payload := block.Payload
    53  
    54  		// check chain ID
    55  		if header.ChainID != m.State.clusterID {
    56  			return state.NewInvalidExtensionErrorf("new block chain ID (%s) does not match configured (%s)", block.Header.ChainID, m.State.clusterID)
    57  		}
    58  
    59  		// check for a specified reference block
    60  		// we also implicitly check this later, but can fail fast here
    61  		if payload.ReferenceBlockID == flow.ZeroID {
    62  			return state.NewInvalidExtensionError("new block has empty reference block ID")
    63  		}
    64  
    65  		// get the chain ID, which determines which cluster state to query
    66  		chainID := header.ChainID
    67  
    68  		// get the latest finalized cluster block and latest finalized consensus height
    69  		var finalizedClusterBlock flow.Header
    70  		err := procedure.RetrieveLatestFinalizedClusterHeader(chainID, &finalizedClusterBlock)(tx)
    71  		if err != nil {
    72  			return fmt.Errorf("could not retrieve finalized cluster head: %w", err)
    73  		}
    74  		var finalizedConsensusHeight uint64
    75  		err = operation.RetrieveFinalizedHeight(&finalizedConsensusHeight)(tx)
    76  		if err != nil {
    77  			return fmt.Errorf("could not retrieve finalized height on consensus chain: %w", err)
    78  		}
    79  
    80  		// get the header of the parent of the new block
    81  		parent, err := m.headers.ByBlockID(header.ParentID)
    82  		if err != nil {
    83  			return fmt.Errorf("could not retrieve latest finalized header: %w", err)
    84  		}
    85  
    86  		// the extending block must increase height by 1 from parent
    87  		if header.Height != parent.Height+1 {
    88  			return state.NewInvalidExtensionErrorf("extending block height (%d) must be parent height + 1 (%d)",
    89  				block.Header.Height, parent.Height)
    90  		}
    91  
    92  		// ensure that the extending block connects to the finalized state, we
    93  		// do this by tracing back until we see a parent block that is the
    94  		// latest finalized block, or reach height below the finalized boundary
    95  
    96  		setupSpan.End()
    97  		checkAnsSpan, _ := m.tracer.StartSpanFromContext(ctx, trace.COLClusterStateMutatorExtendCheckAncestry)
    98  
    99  		// start with the extending block's parent
   100  		parentID := header.ParentID
   101  		for parentID != finalizedClusterBlock.ID() {
   102  
   103  			// get the parent of current block
   104  			ancestor, err := m.headers.ByBlockID(parentID)
   105  			if err != nil {
   106  				return fmt.Errorf("could not get parent (%x): %w", block.Header.ParentID, err)
   107  			}
   108  
   109  			// if its height is below current boundary, the block does not connect
   110  			// to the finalized protocol state and would break database consistency
   111  			if ancestor.Height < finalizedClusterBlock.Height {
   112  				return state.NewOutdatedExtensionErrorf("block doesn't connect to finalized state. ancestor.Height (%d), final.Height (%d)",
   113  					ancestor.Height, finalizedClusterBlock.Height)
   114  			}
   115  
   116  			parentID = ancestor.ParentID
   117  		}
   118  
   119  		checkAnsSpan.End()
   120  		checkTxsSpan, _ := m.tracer.StartSpanFromContext(ctx, trace.COLClusterStateMutatorExtendCheckTransactionsValid)
   121  		defer checkTxsSpan.End()
   122  
   123  		// a valid collection must reference a valid reference block
   124  		// NOTE: it is valid for a collection to be expired at this point,
   125  		// otherwise we would compromise liveness of the cluster.
   126  		refBlock, err := m.headers.ByBlockID(payload.ReferenceBlockID)
   127  		if errors.Is(err, storage.ErrNotFound) {
   128  			return state.NewUnverifiableExtensionError("cluster block references unknown reference block (id=%x)", payload.ReferenceBlockID)
   129  		}
   130  		if err != nil {
   131  			return fmt.Errorf("could not check reference block: %w", err)
   132  		}
   133  
   134  		// no validation of transactions is necessary for empty collections
   135  		if payload.Collection.Len() == 0 {
   136  			return nil
   137  		}
   138  
   139  		// check that all transactions within the collection are valid
   140  		// keep track of the min/max reference blocks - the collection must be non-empty
   141  		// at this point so these are guaranteed to be set correctly
   142  		minRefID := flow.ZeroID
   143  		minRefHeight := uint64(math.MaxUint64)
   144  		maxRefHeight := uint64(0)
   145  		for _, flowTx := range payload.Collection.Transactions {
   146  			refBlock, err := m.headers.ByBlockID(flowTx.ReferenceBlockID)
   147  			if errors.Is(err, storage.ErrNotFound) {
   148  				// unknown reference blocks are invalid
   149  				return state.NewUnverifiableExtensionError("collection contains tx (tx_id=%x) with unknown reference block (block_id=%x): %w", flowTx.ID(), flowTx.ReferenceBlockID, err)
   150  			}
   151  			if err != nil {
   152  				return fmt.Errorf("could not check reference block (id=%x): %w", flowTx.ReferenceBlockID, err)
   153  			}
   154  
   155  			if refBlock.Height < minRefHeight {
   156  				minRefHeight = refBlock.Height
   157  				minRefID = flowTx.ReferenceBlockID
   158  			}
   159  			if refBlock.Height > maxRefHeight {
   160  				maxRefHeight = refBlock.Height
   161  			}
   162  		}
   163  
   164  		// a valid collection must reference the oldest reference block among
   165  		// its constituent transactions
   166  		if minRefID != payload.ReferenceBlockID {
   167  			return state.NewInvalidExtensionErrorf(
   168  				"reference block (id=%x) must match oldest transaction's reference block (id=%x)",
   169  				payload.ReferenceBlockID, minRefID,
   170  			)
   171  		}
   172  		// a valid collection must contain only transactions within its expiry window
   173  		if maxRefHeight-minRefHeight >= flow.DefaultTransactionExpiry {
   174  			return state.NewInvalidExtensionErrorf(
   175  				"collection contains reference height range [%d,%d] exceeding expiry window size: %d",
   176  				minRefHeight, maxRefHeight, flow.DefaultTransactionExpiry)
   177  		}
   178  
   179  		// TODO ensure the reference block is part of the main chain
   180  		_ = refBlock
   181  
   182  		// check for duplicate transactions in block's ancestry
   183  		txLookup := make(map[flow.Identifier]struct{})
   184  		for _, tx := range block.Payload.Collection.Transactions {
   185  			txID := tx.ID()
   186  			if _, exists := txLookup[txID]; exists {
   187  				return state.NewInvalidExtensionErrorf("collection contains transaction (id=%x) more than once", txID)
   188  			}
   189  			txLookup[txID] = struct{}{}
   190  		}
   191  
   192  		// first, check for duplicate transactions in the un-finalized ancestry
   193  		duplicateTxIDs, err := m.checkDupeTransactionsInUnfinalizedAncestry(block, txLookup, finalizedClusterBlock.Height)
   194  		if err != nil {
   195  			return fmt.Errorf("could not check for duplicate txs in un-finalized ancestry: %w", err)
   196  		}
   197  		if len(duplicateTxIDs) > 0 {
   198  			return state.NewInvalidExtensionErrorf("payload includes duplicate transactions in un-finalized ancestry (duplicates: %s)", duplicateTxIDs)
   199  		}
   200  
   201  		// second, check for duplicate transactions in the finalized ancestry
   202  		duplicateTxIDs, err = m.checkDupeTransactionsInFinalizedAncestry(txLookup, minRefHeight, maxRefHeight)
   203  		if err != nil {
   204  			return fmt.Errorf("could not check for duplicate txs in finalized ancestry: %w", err)
   205  		}
   206  		if len(duplicateTxIDs) > 0 {
   207  			return state.NewInvalidExtensionErrorf("payload includes duplicate transactions in finalized ancestry (duplicates: %s)", duplicateTxIDs)
   208  		}
   209  
   210  		return nil
   211  	})
   212  	if err != nil {
   213  		return fmt.Errorf("could not validate extending block: %w", err)
   214  	}
   215  
   216  	insertDbSpan, _ := m.tracer.StartSpanFromContext(ctx, trace.COLClusterStateMutatorExtendDBInsert)
   217  	defer insertDbSpan.End()
   218  
   219  	// insert the new block
   220  	err = operation.RetryOnConflict(m.State.db.Update, procedure.InsertClusterBlock(block))
   221  	if err != nil {
   222  		return fmt.Errorf("could not insert cluster block: %w", err)
   223  	}
   224  	return nil
   225  }
   226  
   227  // checkDupeTransactionsInUnfinalizedAncestry checks for duplicate transactions in the un-finalized
   228  // ancestry of the given block, and returns a list of all duplicates if there are any.
   229  func (m *MutableState) checkDupeTransactionsInUnfinalizedAncestry(block *cluster.Block, includedTransactions map[flow.Identifier]struct{}, finalHeight uint64) ([]flow.Identifier, error) {
   230  
   231  	var duplicateTxIDs []flow.Identifier
   232  	err := fork.TraverseBackward(m.headers, block.Header.ParentID, func(ancestor *flow.Header) error {
   233  		payload, err := m.payloads.ByBlockID(ancestor.ID())
   234  		if err != nil {
   235  			return fmt.Errorf("could not retrieve ancestor payload: %w", err)
   236  		}
   237  
   238  		for _, tx := range payload.Collection.Transactions {
   239  			txID := tx.ID()
   240  			_, duplicated := includedTransactions[txID]
   241  			if duplicated {
   242  				duplicateTxIDs = append(duplicateTxIDs, txID)
   243  			}
   244  		}
   245  		return nil
   246  	}, fork.ExcludingHeight(finalHeight))
   247  
   248  	return duplicateTxIDs, err
   249  }
   250  
   251  // checkDupeTransactionsInFinalizedAncestry checks for duplicate transactions in the finalized
   252  // ancestry, and returns a list of all duplicates if there are any.
   253  func (m *MutableState) checkDupeTransactionsInFinalizedAncestry(includedTransactions map[flow.Identifier]struct{}, minRefHeight, maxRefHeight uint64) ([]flow.Identifier, error) {
   254  	var duplicatedTxIDs []flow.Identifier
   255  
   256  	// Let E be the global transaction expiry constant, measured in blocks. For each
   257  	// T ∈ `includedTransactions`, we have to decide whether the transaction
   258  	// already appeared in _any_ finalized cluster block.
   259  	// Notation:
   260  	//   - consider a valid cluster block C and let c be its reference block height
   261  	//   - consider a transaction T ∈ `includedTransactions` and let t denote its
   262  	//     reference block height
   263  	//
   264  	// Boundary conditions:
   265  	// 1. C's reference block height is equal to the lowest reference block height of
   266  	//    all its constituent transactions. Hence, for collection C to potentially contain T, it must satisfy c <= t.
   267  	// 2. For T to be eligible for inclusion in collection C, _none_ of the transactions within C are allowed
   268  	// to be expired w.r.t. C's reference block. Hence, for collection C to potentially contain T, it must satisfy t < c + E.
   269  	//
   270  	// Therefore, for collection C to potentially contain transaction T, it must satisfy t - E < c <= t.
   271  	// In other words, we only need to inspect collections with reference block height c ∈ (t-E, t].
   272  	// Consequently, for a set of transactions, with `minRefHeight` (`maxRefHeight`) being the smallest (largest)
   273  	// reference block height, we only need to inspect collections with c ∈ (minRefHeight-E, maxRefHeight].
   274  
   275  	// the finalized cluster blocks which could possibly contain any conflicting transactions
   276  	var clusterBlockIDs []flow.Identifier
   277  	start := minRefHeight - flow.DefaultTransactionExpiry + 1
   278  	if start > minRefHeight {
   279  		start = 0 // overflow check
   280  	}
   281  	end := maxRefHeight
   282  	err := m.db.View(operation.LookupClusterBlocksByReferenceHeightRange(start, end, &clusterBlockIDs))
   283  	if err != nil {
   284  		return nil, fmt.Errorf("could not lookup finalized cluster blocks by reference height range [%d,%d]: %w", start, end, err)
   285  	}
   286  
   287  	for _, blockID := range clusterBlockIDs {
   288  		// TODO: could add LightByBlockID and retrieve only tx IDs
   289  		payload, err := m.payloads.ByBlockID(blockID)
   290  		if err != nil {
   291  			return nil, fmt.Errorf("could not retrieve cluster payload (block_id=%x) to de-duplicate: %w", blockID, err)
   292  		}
   293  		for _, tx := range payload.Collection.Transactions {
   294  			txID := tx.ID()
   295  			_, duplicated := includedTransactions[txID]
   296  			if duplicated {
   297  				duplicatedTxIDs = append(duplicatedTxIDs, txID)
   298  			}
   299  		}
   300  	}
   301  
   302  	return duplicatedTxIDs, nil
   303  }