github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/module/builder/collection/builder.go (about)

     1  package collection
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"fmt"
     7  	"time"
     8  
     9  	"github.com/dgraph-io/badger/v2"
    10  	"github.com/rs/zerolog"
    11  
    12  	"github.com/onflow/flow-go/model/cluster"
    13  	"github.com/onflow/flow-go/model/flow"
    14  	"github.com/onflow/flow-go/module"
    15  	"github.com/onflow/flow-go/module/irrecoverable"
    16  	"github.com/onflow/flow-go/module/mempool"
    17  	"github.com/onflow/flow-go/module/trace"
    18  	clusterstate "github.com/onflow/flow-go/state/cluster"
    19  	"github.com/onflow/flow-go/state/fork"
    20  	"github.com/onflow/flow-go/state/protocol"
    21  	"github.com/onflow/flow-go/storage"
    22  	"github.com/onflow/flow-go/storage/badger/operation"
    23  	"github.com/onflow/flow-go/storage/badger/procedure"
    24  	"github.com/onflow/flow-go/utils/logging"
    25  )
    26  
    27  // Builder is the builder for collection block payloads. Upon providing a
    28  // payload hash, it also memorizes the payload contents.
    29  //
    30  // NOTE: Builder is NOT safe for use with multiple goroutines. Since the
    31  // HotStuff event loop is the only consumer of this interface and is single
    32  // threaded, this is OK.
    33  type Builder struct {
    34  	db             *badger.DB
    35  	mainHeaders    storage.Headers
    36  	clusterHeaders storage.Headers
    37  	protoState     protocol.State
    38  	clusterState   clusterstate.State
    39  	payloads       storage.ClusterPayloads
    40  	transactions   mempool.Transactions
    41  	tracer         module.Tracer
    42  	config         Config
    43  	log            zerolog.Logger
    44  	clusterEpoch   uint64 // the operating epoch for this cluster
    45  	// cache of values about the operating epoch which never change
    46  	refEpochFirstHeight uint64           // first height of this cluster's operating epoch
    47  	epochFinalHeight    *uint64          // last height of this cluster's operating epoch (nil if epoch not ended)
    48  	epochFinalID        *flow.Identifier // ID of last block in this cluster's operating epoch (nil if epoch not ended)
    49  }
    50  
    51  func NewBuilder(
    52  	db *badger.DB,
    53  	tracer module.Tracer,
    54  	protoState protocol.State,
    55  	clusterState clusterstate.State,
    56  	mainHeaders storage.Headers,
    57  	clusterHeaders storage.Headers,
    58  	payloads storage.ClusterPayloads,
    59  	transactions mempool.Transactions,
    60  	log zerolog.Logger,
    61  	epochCounter uint64,
    62  	opts ...Opt,
    63  ) (*Builder, error) {
    64  	b := Builder{
    65  		db:             db,
    66  		tracer:         tracer,
    67  		protoState:     protoState,
    68  		clusterState:   clusterState,
    69  		mainHeaders:    mainHeaders,
    70  		clusterHeaders: clusterHeaders,
    71  		payloads:       payloads,
    72  		transactions:   transactions,
    73  		config:         DefaultConfig(),
    74  		log:            log.With().Str("component", "cluster_builder").Logger(),
    75  		clusterEpoch:   epochCounter,
    76  	}
    77  
    78  	err := db.View(operation.RetrieveEpochFirstHeight(epochCounter, &b.refEpochFirstHeight))
    79  	if err != nil {
    80  		return nil, fmt.Errorf("could not get epoch first height: %w", err)
    81  	}
    82  
    83  	for _, apply := range opts {
    84  		apply(&b.config)
    85  	}
    86  
    87  	// sanity check config
    88  	if b.config.ExpiryBuffer >= flow.DefaultTransactionExpiry {
    89  		return nil, fmt.Errorf("invalid configured expiry buffer exceeds tx expiry (%d > %d)", b.config.ExpiryBuffer, flow.DefaultTransactionExpiry)
    90  	}
    91  
    92  	return &b, nil
    93  }
    94  
    95  // BuildOn creates a new block built on the given parent. It produces a payload
    96  // that is valid with respect to the un-finalized chain it extends.
    97  func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) error, sign func(*flow.Header) error) (*flow.Header, error) {
    98  	parentSpan, ctx := b.tracer.StartSpanFromContext(context.Background(), trace.COLBuildOn)
    99  	defer parentSpan.End()
   100  
   101  	// STEP 1: build a lookup for excluding duplicated transactions.
   102  	// This is briefly how it works:
   103  	//
   104  	// Let E be the global transaction expiry.
   105  	// When incorporating a new collection C, with reference height R, we enforce
   106  	// that it contains only transactions with reference heights in [R,R+E).
   107  	// * if we are building C:
   108  	//   * we don't build expired collections (ie. our local finalized consensus height is at most R+E-1)
   109  	//   * we don't include transactions referencing un-finalized blocks
   110  	//   * therefore, C will contain only transactions with reference heights in [R,R+E)
   111  	// * if we are validating C:
   112  	//   * honest validators only consider C valid if all its transactions have reference heights in [R,R+E)
   113  	//
   114  	// Therefore, to check for duplicates, we only need a lookup for transactions in collection
   115  	// with expiry windows that overlap with our collection under construction.
   116  	//
   117  	// A collection with overlapping expiry window can be finalized or un-finalized.
   118  	// * to find all non-expired and finalized collections, we make use of an index
   119  	//   (main_chain_finalized_height -> cluster_block_ids with respective reference height), to search for a range of main chain heights
   120  	//   which could be only referenced by collections with overlapping expiry windows.
   121  	// * to find all overlapping and un-finalized collections, we can't use the above index, because it's
   122  	//   only for finalized collections. Instead, we simply traverse along the chain up to the last
   123  	//   finalized block. This could possibly include some collections with expiry windows that DON'T
   124  	//   overlap with our collection under construction, but it is unlikely and doesn't impact correctness.
   125  	//
   126  	// After combining both the finalized and un-finalized cluster blocks that overlap with our expiry window,
   127  	// we can iterate through their transactions, and build a lookup for excluding duplicated transactions.
   128  	//
   129  	// RATE LIMITING: the builder module can be configured to limit the
   130  	// rate at which transactions with a common payer are included in
   131  	// blocks. Depending on the configured limit, we either allow 1
   132  	// transaction every N sequential collections, or we allow K transactions
   133  	// per collection. The rate limiter tracks transactions included previously
   134  	// to enforce rate limit rules for the constructed block.
   135  
   136  	span, _ := b.tracer.StartSpanFromContext(ctx, trace.COLBuildOnGetBuildCtx)
   137  	buildCtx, err := b.getBlockBuildContext(parentID)
   138  	span.End()
   139  	if err != nil {
   140  		return nil, fmt.Errorf("could not get block build context: %w", err)
   141  	}
   142  
   143  	log := b.log.With().
   144  		Hex("parent_id", parentID[:]).
   145  		Str("chain_id", buildCtx.parent.ChainID.String()).
   146  		Uint64("final_ref_height", buildCtx.refChainFinalizedHeight).
   147  		Logger()
   148  	log.Debug().Msg("building new cluster block")
   149  
   150  	// STEP 1a: create a lookup of all transactions included in UN-FINALIZED ancestors.
   151  	// In contrast to the transactions collected in step 1b, transactions in un-finalized
   152  	// collections cannot be removed from the mempool, as we would want to include
   153  	// such transactions in other forks.
   154  	span, _ = b.tracer.StartSpanFromContext(ctx, trace.COLBuildOnUnfinalizedLookup)
   155  	err = b.populateUnfinalizedAncestryLookup(buildCtx)
   156  	span.End()
   157  	if err != nil {
   158  		return nil, fmt.Errorf("could not populate un-finalized ancestry lookout (parent_id=%x): %w", parentID, err)
   159  	}
   160  
   161  	// STEP 1b: create a lookup of all transactions previously included in
   162  	// the finalized collections. Any transactions already included in finalized
   163  	// collections can be removed from the mempool.
   164  	span, _ = b.tracer.StartSpanFromContext(ctx, trace.COLBuildOnFinalizedLookup)
   165  	err = b.populateFinalizedAncestryLookup(buildCtx)
   166  	span.End()
   167  	if err != nil {
   168  		return nil, fmt.Errorf("could not populate finalized ancestry lookup: %w", err)
   169  	}
   170  
   171  	// STEP 2: build a payload of valid transactions, while at the same
   172  	// time figuring out the correct reference block ID for the collection.
   173  	span, _ = b.tracer.StartSpanFromContext(ctx, trace.COLBuildOnCreatePayload)
   174  	payload, err := b.buildPayload(buildCtx)
   175  	span.End()
   176  	if err != nil {
   177  		return nil, fmt.Errorf("could not build payload: %w", err)
   178  	}
   179  
   180  	// STEP 3: we have a set of transactions that are valid to include on this fork.
   181  	// Now we create the header for the cluster block.
   182  	span, _ = b.tracer.StartSpanFromContext(ctx, trace.COLBuildOnCreateHeader)
   183  	header, err := b.buildHeader(buildCtx, payload, setter, sign)
   184  	span.End()
   185  	if err != nil {
   186  		return nil, fmt.Errorf("could not build header: %w", err)
   187  	}
   188  
   189  	proposal := cluster.Block{
   190  		Header:  header,
   191  		Payload: payload,
   192  	}
   193  
   194  	// STEP 4: insert the cluster block to the database.
   195  	span, _ = b.tracer.StartSpanFromContext(ctx, trace.COLBuildOnDBInsert)
   196  	err = operation.RetryOnConflict(b.db.Update, procedure.InsertClusterBlock(&proposal))
   197  	span.End()
   198  	if err != nil {
   199  		return nil, fmt.Errorf("could not insert built block: %w", err)
   200  	}
   201  
   202  	return proposal.Header, nil
   203  }
   204  
   205  // getBlockBuildContext retrieves the required contextual information from the database
   206  // required to build a new block proposal.
   207  // No errors are expected during normal operation.
   208  func (b *Builder) getBlockBuildContext(parentID flow.Identifier) (*blockBuildContext, error) {
   209  	ctx := new(blockBuildContext)
   210  	ctx.config = b.config
   211  	ctx.parentID = parentID
   212  	ctx.lookup = newTransactionLookup()
   213  
   214  	var err error
   215  	ctx.parent, err = b.clusterHeaders.ByBlockID(parentID)
   216  	if err != nil {
   217  		return nil, fmt.Errorf("could not get parent: %w", err)
   218  	}
   219  	ctx.limiter = newRateLimiter(b.config, ctx.parent.Height+1)
   220  
   221  	// retrieve the finalized boundary ON THE CLUSTER CHAIN
   222  	ctx.clusterChainFinalizedBlock, err = b.clusterState.Final().Head()
   223  	if err != nil {
   224  		return nil, fmt.Errorf("could not retrieve cluster chain finalized header: %w", err)
   225  	}
   226  
   227  	// retrieve the height and ID of the latest finalized block ON THE MAIN CHAIN
   228  	// this is used as the reference point for transaction expiry
   229  	mainChainFinalizedHeader, err := b.protoState.Final().Head()
   230  	if err != nil {
   231  		return nil, fmt.Errorf("could not retrieve main chain finalized header: %w", err)
   232  	}
   233  	ctx.refChainFinalizedHeight = mainChainFinalizedHeader.Height
   234  	ctx.refChainFinalizedID = mainChainFinalizedHeader.ID()
   235  
   236  	// if the epoch has ended and the final block is cached, use the cached values
   237  	if b.epochFinalHeight != nil && b.epochFinalID != nil {
   238  		ctx.refEpochFinalID = b.epochFinalID
   239  		ctx.refEpochFinalHeight = b.epochFinalHeight
   240  		return ctx, nil
   241  	}
   242  
   243  	// otherwise, attempt to read them from storage
   244  	err = b.db.View(func(btx *badger.Txn) error {
   245  		var refEpochFinalHeight uint64
   246  		var refEpochFinalID flow.Identifier
   247  
   248  		err = operation.RetrieveEpochLastHeight(b.clusterEpoch, &refEpochFinalHeight)(btx)
   249  		if err != nil {
   250  			if errors.Is(err, storage.ErrNotFound) {
   251  				return nil
   252  			}
   253  			return fmt.Errorf("unexpected failure to retrieve final height of operating epoch: %w", err)
   254  		}
   255  		err = operation.LookupBlockHeight(refEpochFinalHeight, &refEpochFinalID)(btx)
   256  		if err != nil {
   257  			// if we are able to retrieve the epoch's final height, the block must be finalized
   258  			// therefore failing to look up its height here is an unexpected error
   259  			return irrecoverable.NewExceptionf("could not retrieve ID of finalized final block of operating epoch: %w", err)
   260  		}
   261  
   262  		// cache the values
   263  		b.epochFinalHeight = &refEpochFinalHeight
   264  		b.epochFinalID = &refEpochFinalID
   265  		// store the values in the build context
   266  		ctx.refEpochFinalID = b.epochFinalID
   267  		ctx.refEpochFinalHeight = b.epochFinalHeight
   268  
   269  		return nil
   270  	})
   271  	if err != nil {
   272  		return nil, fmt.Errorf("could not get block build context: %w", err)
   273  	}
   274  	return ctx, nil
   275  }
   276  
   277  // populateUnfinalizedAncestryLookup traverses the unfinalized ancestry backward
   278  // to populate the transaction lookup (used for deduplication) and the rate limiter
   279  // (used to limit transaction submission by payer).
   280  //
   281  // The traversal begins with the block specified by parentID (the block we are
   282  // building on top of) and ends with the oldest unfinalized block in the ancestry.
   283  func (b *Builder) populateUnfinalizedAncestryLookup(ctx *blockBuildContext) error {
   284  	err := fork.TraverseBackward(b.clusterHeaders, ctx.parentID, func(ancestor *flow.Header) error {
   285  		payload, err := b.payloads.ByBlockID(ancestor.ID())
   286  		if err != nil {
   287  			return fmt.Errorf("could not retrieve ancestor payload: %w", err)
   288  		}
   289  
   290  		for _, tx := range payload.Collection.Transactions {
   291  			ctx.lookup.addUnfinalizedAncestor(tx.ID())
   292  			ctx.limiter.addAncestor(ancestor.Height, tx)
   293  		}
   294  		return nil
   295  	}, fork.ExcludingHeight(ctx.clusterChainFinalizedBlock.Height))
   296  	return err
   297  }
   298  
   299  // populateFinalizedAncestryLookup traverses the reference block height index to
   300  // populate the transaction lookup (used for deduplication) and the rate limiter
   301  // (used to limit transaction submission by payer).
   302  //
   303  // The traversal is structured so that we check every collection whose reference
   304  // block height translates to a possible constituent transaction which could also
   305  // appear in the collection we are building.
   306  func (b *Builder) populateFinalizedAncestryLookup(ctx *blockBuildContext) error {
   307  	minRefHeight := ctx.lowestPossibleReferenceBlockHeight()
   308  	maxRefHeight := ctx.highestPossibleReferenceBlockHeight()
   309  	lookup := ctx.lookup
   310  	limiter := ctx.limiter
   311  
   312  	// Let E be the global transaction expiry constant, measured in blocks. For each
   313  	// T ∈ `includedTransactions`, we have to decide whether the transaction
   314  	// already appeared in _any_ finalized cluster block.
   315  	// Notation:
   316  	//   - consider a valid cluster block C and let c be its reference block height
   317  	//   - consider a transaction T ∈ `includedTransactions` and let t denote its
   318  	//     reference block height
   319  	//
   320  	// Boundary conditions:
   321  	// 1. C's reference block height is equal to the lowest reference block height of
   322  	//    all its constituent transactions. Hence, for collection C to potentially contain T, it must satisfy c <= t.
   323  	// 2. For T to be eligible for inclusion in collection C, _none_ of the transactions within C are allowed
   324  	// to be expired w.r.t. C's reference block. Hence, for collection C to potentially contain T, it must satisfy t < c + E.
   325  	//
   326  	// Therefore, for collection C to potentially contain transaction T, it must satisfy t - E < c <= t.
   327  	// In other words, we only need to inspect collections with reference block height c ∈ (t-E, t].
   328  	// Consequently, for a set of transactions, with `minRefHeight` (`maxRefHeight`) being the smallest (largest)
   329  	// reference block height, we only need to inspect collections with c ∈ (minRefHeight-E, maxRefHeight].
   330  
   331  	// the finalized cluster blocks which could possibly contain any conflicting transactions
   332  	var clusterBlockIDs []flow.Identifier
   333  	start, end := findRefHeightSearchRangeForConflictingClusterBlocks(minRefHeight, maxRefHeight)
   334  	err := b.db.View(operation.LookupClusterBlocksByReferenceHeightRange(start, end, &clusterBlockIDs))
   335  	if err != nil {
   336  		return fmt.Errorf("could not lookup finalized cluster blocks by reference height range [%d,%d]: %w", start, end, err)
   337  	}
   338  
   339  	for _, blockID := range clusterBlockIDs {
   340  		header, err := b.clusterHeaders.ByBlockID(blockID)
   341  		if err != nil {
   342  			return fmt.Errorf("could not retrieve cluster header (id=%x): %w", blockID, err)
   343  		}
   344  		payload, err := b.payloads.ByBlockID(blockID)
   345  		if err != nil {
   346  			return fmt.Errorf("could not retrieve cluster payload (block_id=%x): %w", blockID, err)
   347  		}
   348  		for _, tx := range payload.Collection.Transactions {
   349  			lookup.addFinalizedAncestor(tx.ID())
   350  			limiter.addAncestor(header.Height, tx)
   351  		}
   352  	}
   353  
   354  	return nil
   355  }
   356  
   357  // buildPayload constructs a valid payload based on transactions available in the mempool.
   358  // If the mempool is empty, an empty payload will be returned.
   359  // No errors are expected during normal operation.
   360  func (b *Builder) buildPayload(buildCtx *blockBuildContext) (*cluster.Payload, error) {
   361  	lookup := buildCtx.lookup
   362  	limiter := buildCtx.limiter
   363  	maxRefHeight := buildCtx.highestPossibleReferenceBlockHeight()
   364  	// keep track of the actual smallest reference height of all included transactions
   365  	minRefHeight := maxRefHeight
   366  	minRefID := buildCtx.highestPossibleReferenceBlockID()
   367  
   368  	var transactions []*flow.TransactionBody
   369  	var totalByteSize uint64
   370  	var totalGas uint64
   371  	for _, tx := range b.transactions.All() {
   372  
   373  		// if we have reached maximum number of transactions, stop
   374  		if uint(len(transactions)) >= b.config.MaxCollectionSize {
   375  			break
   376  		}
   377  
   378  		txByteSize := uint64(tx.ByteSize())
   379  		// ignore transactions with tx byte size bigger that the max amount per collection
   380  		// this case shouldn't happen ever since we keep a limit on tx byte size but in case
   381  		// we keep this condition
   382  		if txByteSize > b.config.MaxCollectionByteSize {
   383  			continue
   384  		}
   385  
   386  		// because the max byte size per tx is way smaller than the max collection byte size, we can stop here and not continue.
   387  		// to make it more effective in the future we can continue adding smaller ones
   388  		if totalByteSize+txByteSize > b.config.MaxCollectionByteSize {
   389  			break
   390  		}
   391  
   392  		// ignore transactions with max gas bigger that the max total gas per collection
   393  		// this case shouldn't happen ever but in case we keep this condition
   394  		if tx.GasLimit > b.config.MaxCollectionTotalGas {
   395  			continue
   396  		}
   397  
   398  		// cause the max gas limit per tx is way smaller than the total max gas per collection, we can stop here and not continue.
   399  		// to make it more effective in the future we can continue adding smaller ones
   400  		if totalGas+tx.GasLimit > b.config.MaxCollectionTotalGas {
   401  			break
   402  		}
   403  
   404  		// retrieve the main chain header that was used as reference
   405  		refHeader, err := b.mainHeaders.ByBlockID(tx.ReferenceBlockID)
   406  		if errors.Is(err, storage.ErrNotFound) {
   407  			continue // in case we are configured with liberal transaction ingest rules
   408  		}
   409  		if err != nil {
   410  			return nil, fmt.Errorf("could not retrieve reference header: %w", err)
   411  		}
   412  
   413  		// disallow un-finalized reference blocks, and reference blocks beyond the cluster's operating epoch
   414  		if refHeader.Height > maxRefHeight {
   415  			continue
   416  		}
   417  
   418  		txID := tx.ID()
   419  		// make sure the reference block is finalized and not orphaned
   420  		blockIDFinalizedAtRefHeight, err := b.mainHeaders.BlockIDByHeight(refHeader.Height)
   421  		if err != nil {
   422  			return nil, fmt.Errorf("could not check that reference block (id=%x) for transaction (id=%x) is finalized: %w", tx.ReferenceBlockID, txID, err)
   423  		}
   424  		if blockIDFinalizedAtRefHeight != tx.ReferenceBlockID {
   425  			// the transaction references an orphaned block - it will never be valid
   426  			b.transactions.Remove(txID)
   427  			continue
   428  		}
   429  
   430  		// ensure the reference block is not too old
   431  		if refHeader.Height < buildCtx.lowestPossibleReferenceBlockHeight() {
   432  			// the transaction is expired, it will never be valid
   433  			b.transactions.Remove(txID)
   434  			continue
   435  		}
   436  
   437  		// check that the transaction was not already used in un-finalized history
   438  		if lookup.isUnfinalizedAncestor(txID) {
   439  			continue
   440  		}
   441  
   442  		// check that the transaction was not already included in finalized history.
   443  		if lookup.isFinalizedAncestor(txID) {
   444  			// remove from mempool, conflicts with finalized block will never be valid
   445  			b.transactions.Remove(txID)
   446  			continue
   447  		}
   448  
   449  		// enforce rate limiting rules
   450  		if limiter.shouldRateLimit(tx) {
   451  			if b.config.DryRunRateLimit {
   452  				// log that this transaction would have been rate-limited, but we will still include it in the collection
   453  				b.log.Info().
   454  					Hex("tx_id", logging.ID(txID)).
   455  					Str("payer_addr", tx.Payer.String()).
   456  					Float64("rate_limit", b.config.MaxPayerTransactionRate).
   457  					Msg("dry-run: observed transaction that would have been rate limited")
   458  			} else {
   459  				b.log.Debug().
   460  					Hex("tx_id", logging.ID(txID)).
   461  					Str("payer_addr", tx.Payer.String()).
   462  					Float64("rate_limit", b.config.MaxPayerTransactionRate).
   463  					Msg("transaction is rate-limited")
   464  				continue
   465  			}
   466  		}
   467  
   468  		// ensure we find the lowest reference block height
   469  		if refHeader.Height < minRefHeight {
   470  			minRefHeight = refHeader.Height
   471  			minRefID = tx.ReferenceBlockID
   472  		}
   473  
   474  		// update per-payer transaction count
   475  		limiter.transactionIncluded(tx)
   476  
   477  		transactions = append(transactions, tx)
   478  		totalByteSize += txByteSize
   479  		totalGas += tx.GasLimit
   480  	}
   481  
   482  	// build the payload from the transactions
   483  	payload := cluster.PayloadFromTransactions(minRefID, transactions...)
   484  	return &payload, nil
   485  }
   486  
   487  // buildHeader constructs the header for the cluster block being built.
   488  // It invokes the HotStuff setter to set fields related to HotStuff (QC, etc.).
   489  // No errors are expected during normal operation.
   490  func (b *Builder) buildHeader(
   491  	ctx *blockBuildContext,
   492  	payload *cluster.Payload,
   493  	setter func(header *flow.Header) error,
   494  	sign func(*flow.Header) error,
   495  ) (*flow.Header, error) {
   496  
   497  	header := &flow.Header{
   498  		ChainID:     ctx.parent.ChainID,
   499  		ParentID:    ctx.parentID,
   500  		Height:      ctx.parent.Height + 1,
   501  		PayloadHash: payload.Hash(),
   502  		Timestamp:   time.Now().UTC(),
   503  
   504  		// NOTE: we rely on the HotStuff-provided setter to set the other
   505  		// fields, which are related to signatures and HotStuff internals
   506  	}
   507  
   508  	// set fields specific to the consensus algorithm
   509  	err := setter(header)
   510  	if err != nil {
   511  		return nil, fmt.Errorf("could not set fields to header: %w", err)
   512  	}
   513  	err = sign(header)
   514  	if err != nil {
   515  		return nil, fmt.Errorf("could not sign proposal: %w", err)
   516  	}
   517  	return header, nil
   518  }
   519  
   520  // findRefHeightSearchRangeForConflictingClusterBlocks computes the range of reference
   521  // block heights of ancestor blocks which could possibly contain transactions
   522  // duplicating those in our collection under construction, based on the range of
   523  // reference heights of transactions in the collection under construction.
   524  //
   525  // Input range is the (inclusive) range of reference heights of transactions included
   526  // in the collection under construction. Output range is the (inclusive) range of
   527  // reference heights which need to be searched.
   528  func findRefHeightSearchRangeForConflictingClusterBlocks(minRefHeight, maxRefHeight uint64) (start, end uint64) {
   529  	start = minRefHeight - flow.DefaultTransactionExpiry + 1
   530  	if start > minRefHeight {
   531  		start = 0 // overflow check
   532  	}
   533  	end = maxRefHeight
   534  	return start, end
   535  }