github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/kv/kvclient/kvcoord/txn_interceptor_span_refresher.go (about)

     1  // Copyright 2018 The Cockroach Authors.
     2  //
     3  // Use of this software is governed by the Business Source License
     4  // included in the file licenses/BSL.txt.
     5  //
     6  // As of the Change Date specified in that file, in accordance with
     7  // the Business Source License, use of this software will be governed
     8  // by the Apache License, Version 2.0, included in the file
     9  // licenses/APL.txt.
    10  
    11  package kvcoord
    12  
    13  import (
    14  	"context"
    15  
    16  	"github.com/cockroachdb/cockroach/pkg/roachpb"
    17  	"github.com/cockroachdb/cockroach/pkg/settings"
    18  	"github.com/cockroachdb/cockroach/pkg/settings/cluster"
    19  	"github.com/cockroachdb/cockroach/pkg/util/hlc"
    20  	"github.com/cockroachdb/cockroach/pkg/util/log"
    21  	"github.com/cockroachdb/cockroach/pkg/util/metric"
    22  	"github.com/cockroachdb/errors"
    23  )
    24  
    25  const (
    26  	// maxTxnRefreshAttempts defines the maximum number of times a single
    27  	// transactional batch can trigger a refresh spans attempt. A batch
    28  	// may need multiple refresh attempts if it runs into progressively
    29  	// larger timestamps as more and more of its component requests are
    30  	// executed.
    31  	maxTxnRefreshAttempts = 5
    32  )
    33  
    34  // MaxTxnRefreshSpansBytes is a threshold in bytes for refresh spans stored
    35  // on the coordinator during the lifetime of a transaction. Refresh spans
    36  // are used for SERIALIZABLE transactions to avoid client restarts.
    37  var MaxTxnRefreshSpansBytes = settings.RegisterPublicIntSetting(
    38  	"kv.transaction.max_refresh_spans_bytes",
    39  	"maximum number of bytes used to track refresh spans in serializable transactions",
    40  	256*1000,
    41  )
    42  
    43  // txnSpanRefresher is a txnInterceptor that collects the read spans of a
    44  // serializable transaction in the event it gets a serializable retry error. It
    45  // can then use the set of read spans to avoid retrying the transaction if all
    46  // the spans can be updated to the current transaction timestamp.
    47  //
    48  // Serializable isolation mandates that transactions appear to have occurred in
    49  // some total order, where none of their component sub-operations appear to have
    50  // interleaved with sub-operations from other transactions. CockroachDB enforces
    51  // this isolation level by ensuring that all of a transaction's reads and writes
    52  // are performed at the same HLC timestamp. This timestamp is referred to as the
    53  // transaction's commit timestamp.
    54  //
    55  // As a transaction in CockroachDB executes at a certain provisional commit
    56  // timestamp, it lays down intents at this timestamp for any write operations
    57  // and ratchets various timestamp cache entries to this timestamp for any read
    58  // operations. If a transaction performs all of its reads and writes and is able
    59  // to commit at its original provisional commit timestamp then it may go ahead
    60  // and do so. However, for a number of reasons including conflicting reads and
    61  // writes, a transaction may discover that its provisional commit timestamp is
    62  // too low and that it needs to move this timestamp forward to commit.
    63  //
    64  // This poses a problem for operations that the transaction has already
    65  // completed at lower timestamps. Are the effects of these operations still
    66  // valid? The transaction is always free to perform a full restart at a higher
    67  // epoch, but this often requires iterating in a client-side retry loop and
    68  // performing all of the transaction's operations again. Intents are maintained
    69  // across retries to improve the chance that later epochs succeed, but it is
    70  // vastly preferable to avoid re-issuing these operations. Instead, it would be
    71  // ideal if the transaction could "move" each of its operations to its new
    72  // provisional commit timestamp without redoing them entirely.
    73  //
    74  // Only a single write intent can exist on a key and no reads are allowed above
    75  // the intent's timestamp until the intent is resolved, so a transaction is free
    76  // to move any of its intent to a higher timestamp. In fact, a synchronous
    77  // rewrite of these intents isn't even necessary because intent resolution will
    78  // already rewrite the intents at higher timestamp if necessary. So, moving
    79  // write intents to a higher timestamp can be performed implicitly by committing
    80  // their transaction at a higher timestamp. However, unlike intents created by
    81  // writes, timestamp cache entries created by reads only prevent writes on
    82  // overlapping keys from being written at or below their timestamp; they do
    83  // nothing to prevent writes on overlapping keys from being written above their
    84  // timestamp. This means that a transaction is not free to blindly move its
    85  // reads to a higher timestamp because writes from other transaction may have
    86  // already invalidated them. In effect, this means that transactions acquire
    87  // pessimistic write locks and optimistic read locks.
    88  //
    89  // The txnSpanRefresher is in charge of detecting when a transaction may want to
    90  // move its provisional commit timestamp forward and determining whether doing
    91  // so is safe given the reads that it has performed (i.e. its "optimistic read
    92  // locks"). When the interceptor decides to attempt to move a transaction's
    93  // timestamp forward, it first "refreshes" each of its reads. This refreshing
    94  // step revisits all of the key spans that the transaction has read and checks
    95  // whether any writes have occurred between the original time that these span
    96  // were read and the timestamp that the transaction now wants to commit at that
    97  // change the result of these reads. If any read would produce a different
    98  // result at the newer commit timestamp, the refresh fails and the transaction
    99  // is forced to fall back to a full transaction restart. However, if all of the
   100  // reads would produce exactly the same result at the newer commit timestamp,
   101  // the timestamp cache entries for these reads are updated and the transaction
   102  // is free to update its provisional commit timestamp without needing to
   103  // restart.
   104  type txnSpanRefresher struct {
   105  	st      *cluster.Settings
   106  	knobs   *ClientTestingKnobs
   107  	riGen   rangeIteratorFactory
   108  	wrapped lockedSender
   109  
   110  	// refreshFootprint contains key spans which were read during the
   111  	// transaction. In case the transaction's timestamp needs to be pushed, we can
   112  	// avoid a retriable error by "refreshing" these spans: verifying that there
   113  	// have been no changes to their data in between the timestamp at which they
   114  	// were read and the higher timestamp we want to move to.
   115  	refreshFootprint condensableSpanSet
   116  	// refreshInvalid is set if refresh spans have not been collected (because the
   117  	// memory budget was exceeded). When set, refreshFootprint is empty. This is
   118  	// set when we've failed to condense the refresh spans below the target memory
   119  	// limit.
   120  	refreshInvalid bool
   121  
   122  	// refreshedTimestamp keeps track of the largest timestamp that refreshed
   123  	// don't fail on (i.e. if we'll refresh, we'll refreshFrom timestamp onwards).
   124  	// After every epoch bump, it is initialized to the timestamp of the first
   125  	// batch. It is then bumped after every successful refresh.
   126  	refreshedTimestamp hlc.Timestamp
   127  
   128  	// canAutoRetry is set if the txnSpanRefresher is allowed to auto-retry.
   129  	canAutoRetry bool
   130  
   131  	refreshSuccess                *metric.Counter
   132  	refreshFail                   *metric.Counter
   133  	refreshFailWithCondensedSpans *metric.Counter
   134  	refreshMemoryLimitExceeded    *metric.Counter
   135  }
   136  
   137  // SendLocked implements the lockedSender interface.
   138  func (sr *txnSpanRefresher) SendLocked(
   139  	ctx context.Context, ba roachpb.BatchRequest,
   140  ) (*roachpb.BatchResponse, *roachpb.Error) {
   141  	batchReadTimestamp := ba.Txn.ReadTimestamp
   142  	if sr.refreshedTimestamp.IsEmpty() {
   143  		// This must be the first batch we're sending for this epoch. Future
   144  		// refreshes shouldn't check values below batchReadTimestamp, so initialize
   145  		// sr.refreshedTimestamp.
   146  		sr.refreshedTimestamp = batchReadTimestamp
   147  	} else if batchReadTimestamp.Less(sr.refreshedTimestamp) {
   148  		// sr.refreshedTimestamp might be ahead of batchReadTimestamp. We want to
   149  		// read at the latest refreshed timestamp, so bump the batch.
   150  		// batchReadTimestamp can be behind after a successful refresh, if the
   151  		// TxnCoordSender hasn't actually heard about the updated read timestamp.
   152  		// This can happen if a refresh succeeds, but then the retry of the batch
   153  		// that produced the timestamp fails without returning the update txn (for
   154  		// example, through a canceled ctx). The client should only be sending
   155  		// rollbacks in such cases.
   156  		ba.Txn.ReadTimestamp.Forward(sr.refreshedTimestamp)
   157  		ba.Txn.WriteTimestamp.Forward(sr.refreshedTimestamp)
   158  	} else if sr.refreshedTimestamp != batchReadTimestamp {
   159  		return nil, roachpb.NewError(errors.AssertionFailedf(
   160  			"unexpected batch read timestamp: %s. Expected refreshed timestamp: %s. ba: %s. txn: %s",
   161  			batchReadTimestamp, sr.refreshedTimestamp, ba, ba.Txn))
   162  	}
   163  
   164  	// Set the batch's CanForwardReadTimestamp flag.
   165  	canFwdRTS := sr.canForwardReadTimestampWithoutRefresh(ba.Txn)
   166  	ba.CanForwardReadTimestamp = canFwdRTS
   167  	if rArgs, hasET := ba.GetArg(roachpb.EndTxn); hasET {
   168  		et := rArgs.(*roachpb.EndTxnRequest)
   169  		et.CanCommitAtHigherTimestamp = canFwdRTS
   170  	}
   171  
   172  	maxAttempts := maxTxnRefreshAttempts
   173  	if knob := sr.knobs.MaxTxnRefreshAttempts; knob != 0 {
   174  		if knob == -1 {
   175  			maxAttempts = 0
   176  		} else {
   177  			maxAttempts = knob
   178  		}
   179  	}
   180  
   181  	// Send through wrapped lockedSender. Unlocks while sending then re-locks.
   182  	br, pErr := sr.sendLockedWithRefreshAttempts(ctx, ba, maxAttempts)
   183  	if pErr != nil {
   184  		return nil, pErr
   185  	}
   186  
   187  	// If the transaction is no longer pending, just return without
   188  	// attempting to record its refresh spans.
   189  	if br.Txn.Status != roachpb.PENDING {
   190  		return br, nil
   191  	}
   192  
   193  	// Iterate over and aggregate refresh spans in the requests, qualified by
   194  	// possible resume spans in the responses.
   195  	if !sr.refreshInvalid {
   196  		if err := sr.appendRefreshSpans(ctx, ba, br); err != nil {
   197  			return nil, roachpb.NewError(err)
   198  		}
   199  		// Check whether we should condense the refresh spans.
   200  		maxBytes := MaxTxnRefreshSpansBytes.Get(&sr.st.SV)
   201  		if sr.refreshFootprint.bytes >= maxBytes {
   202  			condensedBefore := sr.refreshFootprint.condensed
   203  			condensedSufficient := sr.tryCondenseRefreshSpans(ctx, maxBytes)
   204  			if condensedSufficient {
   205  				log.VEventf(ctx, 2, "condensed refresh spans for txn %s to %d bytes",
   206  					br.Txn, sr.refreshFootprint.bytes)
   207  			} else {
   208  				// Condensing was not enough. Giving up on tracking reads. Refreshed
   209  				// will not be possible.
   210  				log.VEventf(ctx, 2, "condensed refresh spans didn't save enough memory. txn %s. "+
   211  					"refresh spans after condense: %d bytes",
   212  					br.Txn, sr.refreshFootprint.bytes)
   213  				sr.refreshInvalid = true
   214  				sr.refreshFootprint.clear()
   215  			}
   216  
   217  			if sr.refreshFootprint.condensed && !condensedBefore {
   218  				sr.refreshMemoryLimitExceeded.Inc(1)
   219  			}
   220  		}
   221  	}
   222  	return br, nil
   223  }
   224  
   225  // tryCondenseRefreshSpans attempts to condense the refresh spans in order to
   226  // save memory. Returns true if we managed to condense them below maxBytes.
   227  func (sr *txnSpanRefresher) tryCondenseRefreshSpans(ctx context.Context, maxBytes int64) bool {
   228  	if sr.knobs.CondenseRefreshSpansFilter != nil && !sr.knobs.CondenseRefreshSpansFilter() {
   229  		return false
   230  	}
   231  	sr.refreshFootprint.maybeCondense(ctx, sr.riGen, maxBytes)
   232  	return sr.refreshFootprint.bytes < maxBytes
   233  }
   234  
   235  // sendLockedWithRefreshAttempts sends the batch through the wrapped sender. It
   236  // catches serializable errors and attempts to avoid them by refreshing the txn
   237  // at a larger timestamp.
   238  func (sr *txnSpanRefresher) sendLockedWithRefreshAttempts(
   239  	ctx context.Context, ba roachpb.BatchRequest, maxRefreshAttempts int,
   240  ) (*roachpb.BatchResponse, *roachpb.Error) {
   241  	if ba.Txn.WriteTooOld {
   242  		// The WriteTooOld flag is not supposed to be set on requests. It's only set
   243  		// by the server and it's terminated by this interceptor on the client.
   244  		log.Fatalf(ctx, "unexpected WriteTooOld request. ba: %s (txn: %s)",
   245  			ba.String(), ba.Txn.String())
   246  	}
   247  	br, pErr := sr.wrapped.SendLocked(ctx, ba)
   248  
   249  	// 19.2 servers might give us an error with the WriteTooOld flag set. This
   250  	// interceptor wants to always terminate that flag. In the case of an error,
   251  	// we can just ignore it.
   252  	if pErr != nil && pErr.GetTxn() != nil {
   253  		pErr.GetTxn().WriteTooOld = false
   254  	}
   255  
   256  	if pErr == nil && br.Txn.WriteTooOld {
   257  		// If we got a response with the WriteTooOld flag set, then we pretend that
   258  		// we got a WriteTooOldError, which will cause us to attempt to refresh and
   259  		// propagate the error if we failed. When it can, the server prefers to
   260  		// return the WriteTooOld flag, rather than a WriteTooOldError because, in
   261  		// the former case, it can leave intents behind. We like refreshing eagerly
   262  		// when the WriteTooOld flag is set because it's likely that the refresh
   263  		// will fail (if we previously read the key that's now causing a WTO, then
   264  		// the refresh will surely fail).
   265  		// TODO(andrei): Implement a more discerning policy based on whether we've
   266  		// read that key before.
   267  		//
   268  		// If the refresh fails, we could continue running the transaction even
   269  		// though it will not be able to commit, in order for it to lay down more
   270  		// intents. Not doing so, though, gives the SQL a chance to auto-retry.
   271  		// TODO(andrei): Implement a more discerning policy based on whether
   272  		// auto-retries are still possible.
   273  		//
   274  		// For the refresh, we have two options: either refresh everything read
   275  		// *before* this batch, and then retry this batch, or refresh the current
   276  		// batch's reads too and then, if successful, there'd be nothing to refresh.
   277  		// We take the former option by setting br = nil below to minimized the
   278  		// chances that the refresh fails.
   279  		bumpedTxn := br.Txn.Clone()
   280  		bumpedTxn.WriteTooOld = false
   281  		bumpedTxn.ReadTimestamp = bumpedTxn.WriteTimestamp
   282  		pErr = roachpb.NewErrorWithTxn(
   283  			roachpb.NewTransactionRetryError(roachpb.RETRY_WRITE_TOO_OLD,
   284  				"WriteTooOld flag converted to WriteTooOldError"),
   285  			bumpedTxn)
   286  		br = nil
   287  	}
   288  	if pErr != nil && maxRefreshAttempts > 0 {
   289  		br, pErr = sr.maybeRetrySend(ctx, ba, pErr, maxRefreshAttempts)
   290  	}
   291  	sr.forwardRefreshTimestampOnResponse(br, pErr)
   292  	return br, pErr
   293  }
   294  
   295  // maybeRetrySend attempts to catch serializable errors and avoid them by
   296  // refreshing the txn at a larger timestamp. If it succeeds at refreshing the
   297  // txn timestamp, it recurses into sendLockedWithRefreshAttempts and retries the
   298  // batch. If the refresh fails, the input pErr is returned.
   299  func (sr *txnSpanRefresher) maybeRetrySend(
   300  	ctx context.Context, ba roachpb.BatchRequest, pErr *roachpb.Error, maxRefreshAttempts int,
   301  ) (*roachpb.BatchResponse, *roachpb.Error) {
   302  	// Check for an error which can be retried after updating spans.
   303  	canRetryTxn, retryTxn := roachpb.CanTransactionRetryAtRefreshedTimestamp(ctx, pErr)
   304  	if !canRetryTxn || !sr.canAutoRetry {
   305  		return nil, pErr
   306  	}
   307  
   308  	// If a prefix of the batch was executed, collect refresh spans for
   309  	// that executed portion, and retry the remainder. The canonical
   310  	// case is a batch split between everything up to but not including
   311  	// the EndTxn. Requests up to the EndTxn succeed, but the EndTxn
   312  	// fails with a retryable error. We want to retry only the EndTxn.
   313  	ba.UpdateTxn(retryTxn)
   314  	log.VEventf(ctx, 2, "retrying %s at refreshed timestamp %s because of %s",
   315  		ba, retryTxn.ReadTimestamp, pErr)
   316  
   317  	// Try updating the txn spans so we can retry.
   318  	if ok := sr.tryUpdatingTxnSpans(ctx, retryTxn); !ok {
   319  		sr.refreshFail.Inc(1)
   320  		if sr.refreshFootprint.condensed {
   321  			sr.refreshFailWithCondensedSpans.Inc(1)
   322  		}
   323  		return nil, pErr
   324  	}
   325  	sr.refreshSuccess.Inc(1)
   326  
   327  	// We've refreshed all of the read spans successfully and bumped
   328  	// ba.Txn's timestamps. Attempt the request again.
   329  	retryBr, retryErr := sr.sendLockedWithRefreshAttempts(
   330  		ctx, ba, maxRefreshAttempts-1,
   331  	)
   332  	if retryErr != nil {
   333  		log.VEventf(ctx, 2, "retry failed with %s", retryErr)
   334  		return nil, retryErr
   335  	}
   336  
   337  	log.VEventf(ctx, 2, "retry successful @%s", retryBr.Txn.ReadTimestamp)
   338  	return retryBr, nil
   339  }
   340  
   341  // tryUpdatingTxnSpans sends Refresh and RefreshRange commands to all spans read
   342  // during the transaction to ensure that no writes were written more recently
   343  // than sr.refreshedTimestamp. All implicated timestamp caches are updated with
   344  // the final transaction timestamp. Returns whether the refresh was successful
   345  // or not.
   346  func (sr *txnSpanRefresher) tryUpdatingTxnSpans(
   347  	ctx context.Context, refreshTxn *roachpb.Transaction,
   348  ) bool {
   349  
   350  	if sr.refreshInvalid {
   351  		log.VEvent(ctx, 2, "can't refresh txn spans; not valid")
   352  		return false
   353  	} else if sr.refreshFootprint.empty() {
   354  		log.VEvent(ctx, 2, "there are no txn spans to refresh")
   355  		sr.refreshedTimestamp.Forward(refreshTxn.ReadTimestamp)
   356  		return true
   357  	}
   358  
   359  	// Refresh all spans (merge first).
   360  	// TODO(nvanbenschoten): actually merge spans.
   361  	refreshSpanBa := roachpb.BatchRequest{}
   362  	refreshSpanBa.Txn = refreshTxn
   363  	addRefreshes := func(refreshes *condensableSpanSet) {
   364  		// We're going to check writes between the previous refreshed timestamp, if
   365  		// any, and the timestamp we want to bump the transaction to. Note that if
   366  		// we've already refreshed the transaction before, we don't need to check
   367  		// the (key ranges x timestamp range) that we've already checked - there's
   368  		// no values there for sure.
   369  		// More importantly, reads that have happened since we've previously
   370  		// refreshed don't need to be checked below below the timestamp at which
   371  		// they've been read (which is the timestamp to which we've previously
   372  		// refreshed). Checking below that timestamp (like we would, for example, if
   373  		// we simply used txn.OrigTimestamp here), could cause false-positives that
   374  		// would fail the refresh.
   375  		for _, u := range refreshes.asSlice() {
   376  			var req roachpb.Request
   377  			if len(u.EndKey) == 0 {
   378  				req = &roachpb.RefreshRequest{
   379  					RequestHeader: roachpb.RequestHeaderFromSpan(u),
   380  					RefreshFrom:   sr.refreshedTimestamp,
   381  				}
   382  			} else {
   383  				req = &roachpb.RefreshRangeRequest{
   384  					RequestHeader: roachpb.RequestHeaderFromSpan(u),
   385  					RefreshFrom:   sr.refreshedTimestamp,
   386  				}
   387  			}
   388  			refreshSpanBa.Add(req)
   389  			log.VEventf(ctx, 2, "updating span %s @%s - @%s to avoid serializable restart",
   390  				req.Header().Span(), sr.refreshedTimestamp, refreshTxn.WriteTimestamp)
   391  		}
   392  	}
   393  	addRefreshes(&sr.refreshFootprint)
   394  
   395  	// Send through wrapped lockedSender. Unlocks while sending then re-locks.
   396  	if _, batchErr := sr.wrapped.SendLocked(ctx, refreshSpanBa); batchErr != nil {
   397  		log.VEventf(ctx, 2, "failed to refresh txn spans (%s); propagating original retry error", batchErr)
   398  		return false
   399  	}
   400  
   401  	sr.refreshedTimestamp.Forward(refreshTxn.ReadTimestamp)
   402  	return true
   403  }
   404  
   405  // appendRefreshSpans appends refresh spans from the supplied batch request,
   406  // qualified by the batch response where appropriate.
   407  func (sr *txnSpanRefresher) appendRefreshSpans(
   408  	ctx context.Context, ba roachpb.BatchRequest, br *roachpb.BatchResponse,
   409  ) error {
   410  	readTimestamp := br.Txn.ReadTimestamp
   411  	if readTimestamp.Less(sr.refreshedTimestamp) {
   412  		// This can happen with (illegal) concurrent txn use, but that's supposed to
   413  		// be detected by the gatekeeper interceptor.
   414  		return errors.AssertionFailedf("attempting to append refresh spans after the tracked"+
   415  			" timestamp has moved forward. batchTimestamp: %s refreshedTimestamp: %s ba: %s",
   416  			errors.Safe(readTimestamp), errors.Safe(sr.refreshedTimestamp), ba)
   417  	}
   418  
   419  	ba.RefreshSpanIterate(br, func(span roachpb.Span) {
   420  		log.VEventf(ctx, 3, "recording span to refresh: %s", span)
   421  		sr.refreshFootprint.insert(span)
   422  	})
   423  	return nil
   424  }
   425  
   426  // canForwardReadTimestampWithoutRefresh returns whether the transaction can
   427  // forward its read timestamp without refreshing any read spans. This allows
   428  // for the "server-side refresh" optimization, where batches are re-evaluated
   429  // at a higher read-timestamp without returning to transaction coordinator.
   430  func (sr *txnSpanRefresher) canForwardReadTimestampWithoutRefresh(txn *roachpb.Transaction) bool {
   431  	return sr.canAutoRetry && !sr.refreshInvalid && sr.refreshFootprint.empty() && !txn.CommitTimestampFixed
   432  }
   433  
   434  // forwardRefreshTimestampOnResponse updates the refresher's tracked
   435  // refreshedTimestamp to stay in sync with "server-side refreshes", where the
   436  // transaction's read timestamp is updated during the evaluation of a batch.
   437  func (sr *txnSpanRefresher) forwardRefreshTimestampOnResponse(
   438  	br *roachpb.BatchResponse, pErr *roachpb.Error,
   439  ) {
   440  	var txn *roachpb.Transaction
   441  	if pErr != nil {
   442  		txn = pErr.GetTxn()
   443  	} else {
   444  		txn = br.Txn
   445  	}
   446  	if txn != nil {
   447  		sr.refreshedTimestamp.Forward(txn.ReadTimestamp)
   448  	}
   449  }
   450  
   451  // setWrapped implements the txnInterceptor interface.
   452  func (sr *txnSpanRefresher) setWrapped(wrapped lockedSender) { sr.wrapped = wrapped }
   453  
   454  // populateLeafInputState is part of the txnInterceptor interface.
   455  func (sr *txnSpanRefresher) populateLeafInputState(tis *roachpb.LeafTxnInputState) {
   456  	tis.RefreshInvalid = sr.refreshInvalid
   457  }
   458  
   459  // populateLeafFinalState is part of the txnInterceptor interface.
   460  func (sr *txnSpanRefresher) populateLeafFinalState(tfs *roachpb.LeafTxnFinalState) {
   461  	tfs.RefreshInvalid = sr.refreshInvalid
   462  	if !sr.refreshInvalid {
   463  		// Copy mutable state so access is safe for the caller.
   464  		tfs.RefreshSpans = append([]roachpb.Span(nil), sr.refreshFootprint.asSlice()...)
   465  	}
   466  }
   467  
   468  // importLeafFinalState is part of the txnInterceptor interface.
   469  func (sr *txnSpanRefresher) importLeafFinalState(
   470  	ctx context.Context, tfs *roachpb.LeafTxnFinalState,
   471  ) {
   472  	if tfs.RefreshInvalid {
   473  		sr.refreshInvalid = true
   474  		sr.refreshFootprint.clear()
   475  	} else if !sr.refreshInvalid {
   476  		sr.refreshFootprint.insert(tfs.RefreshSpans...)
   477  		sr.refreshFootprint.maybeCondense(ctx, sr.riGen, MaxTxnRefreshSpansBytes.Get(&sr.st.SV))
   478  	}
   479  }
   480  
   481  // epochBumpedLocked implements the txnInterceptor interface.
   482  func (sr *txnSpanRefresher) epochBumpedLocked() {
   483  	sr.refreshFootprint.clear()
   484  	sr.refreshInvalid = false
   485  	sr.refreshedTimestamp.Reset()
   486  }
   487  
   488  // createSavepointLocked is part of the txnReqInterceptor interface.
   489  func (sr *txnSpanRefresher) createSavepointLocked(ctx context.Context, s *savepoint) {
   490  	s.refreshSpans = make([]roachpb.Span, len(sr.refreshFootprint.asSlice()))
   491  	copy(s.refreshSpans, sr.refreshFootprint.asSlice())
   492  	s.refreshInvalid = sr.refreshInvalid
   493  }
   494  
   495  // rollbackToSavepointLocked is part of the txnReqInterceptor interface.
   496  func (sr *txnSpanRefresher) rollbackToSavepointLocked(ctx context.Context, s savepoint) {
   497  	sr.refreshFootprint.clear()
   498  	sr.refreshFootprint.insert(s.refreshSpans...)
   499  	sr.refreshInvalid = s.refreshInvalid
   500  }
   501  
   502  // closeLocked implements the txnInterceptor interface.
   503  func (*txnSpanRefresher) closeLocked() {}