github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/kv/kvserver/replica_follower_read.go (about)

     1  // Copyright 2019 The Cockroach Authors.
     2  //
     3  // Use of this software is governed by the Business Source License
     4  // included in the file licenses/BSL.txt.
     5  //
     6  // As of the Change Date specified in that file, in accordance with
     7  // the Business Source License, use of this software will be governed
     8  // by the Apache License, Version 2.0, included in the file
     9  // licenses/APL.txt.
    10  
    11  package kvserver
    12  
    13  import (
    14  	"context"
    15  
    16  	"github.com/cockroachdb/cockroach/pkg/kv/kvserver/closedts/ctpb"
    17  	ctstorage "github.com/cockroachdb/cockroach/pkg/kv/kvserver/closedts/storage"
    18  	"github.com/cockroachdb/cockroach/pkg/roachpb"
    19  	"github.com/cockroachdb/cockroach/pkg/settings"
    20  	"github.com/cockroachdb/cockroach/pkg/util/hlc"
    21  	"github.com/cockroachdb/cockroach/pkg/util/log"
    22  )
    23  
    24  // FollowerReadsEnabled controls whether replicas attempt to serve follower
    25  // reads. The closed timestamp machinery is unaffected by this, i.e. the same
    26  // information is collected and passed around, regardless of the value of this
    27  // setting.
    28  var FollowerReadsEnabled = settings.RegisterPublicBoolSetting(
    29  	"kv.closed_timestamp.follower_reads_enabled",
    30  	"allow (all) replicas to serve consistent historical reads based on closed timestamp information",
    31  	true,
    32  )
    33  
    34  // canServeFollowerRead tests, when a range lease could not be acquired, whether
    35  // the batch can be served as a follower read despite the error. Only
    36  // non-locking, read-only requests can be served as follower reads. The batch
    37  // must be composed exclusively only this kind of request to be accepted as a
    38  // follower read.
    39  func (r *Replica) canServeFollowerRead(
    40  	ctx context.Context, ba *roachpb.BatchRequest, pErr *roachpb.Error,
    41  ) *roachpb.Error {
    42  	canServeFollowerRead := false
    43  	if lErr, ok := pErr.GetDetail().(*roachpb.NotLeaseHolderError); ok &&
    44  		lErr.LeaseHolder != nil && lErr.Lease.Type() == roachpb.LeaseEpoch &&
    45  		(!ba.IsLocking() && ba.IsAllTransactional()) && // followerreadsccl.batchCanBeEvaluatedOnFollower
    46  		(ba.Txn == nil || !ba.Txn.IsLocking()) && // followerreadsccl.txnCanPerformFollowerRead
    47  		FollowerReadsEnabled.Get(&r.store.cfg.Settings.SV) {
    48  
    49  		// There's no known reason that a non-VOTER_FULL replica couldn't serve follower
    50  		// reads (or RangeFeed), but as of the time of writing, these are expected
    51  		// to be short-lived, so it's not worth working out the edge-cases. Revisit if
    52  		// we add long-lived learners or feel that incoming/outgoing voters also need
    53  		// to be able to serve follower reads.
    54  		repDesc, err := r.GetReplicaDescriptor()
    55  		if err != nil {
    56  			return roachpb.NewError(err)
    57  		}
    58  		if typ := repDesc.GetType(); typ != roachpb.VOTER_FULL {
    59  			log.Eventf(ctx, "%s replicas cannot serve follower reads", typ)
    60  			return pErr
    61  		}
    62  
    63  		ts := ba.Timestamp
    64  		if ba.Txn != nil {
    65  			ts.Forward(ba.Txn.MaxTimestamp)
    66  		}
    67  
    68  		maxClosed, _ := r.maxClosed(ctx)
    69  		canServeFollowerRead = ts.LessEq(maxClosed)
    70  		if !canServeFollowerRead {
    71  			// We can't actually serve the read based on the closed timestamp.
    72  			// Signal the clients that we want an update so that future requests can succeed.
    73  			r.store.cfg.ClosedTimestamp.Clients.Request(lErr.LeaseHolder.NodeID, r.RangeID)
    74  
    75  			if false {
    76  				// NB: this can't go behind V(x) because the log message created by the
    77  				// storage might be gigantic in real clusters, and we don't want to trip it
    78  				// using logspy.
    79  				log.Warningf(ctx, "can't serve follower read for %s at epo %d, storage is %s",
    80  					ba.Timestamp, lErr.Lease.Epoch,
    81  					r.store.cfg.ClosedTimestamp.Storage.(*ctstorage.MultiStorage).StringForNodes(lErr.LeaseHolder.NodeID),
    82  				)
    83  			}
    84  		}
    85  	}
    86  
    87  	if !canServeFollowerRead {
    88  		// We couldn't do anything with the error, propagate it.
    89  		return pErr
    90  	}
    91  
    92  	// This replica can serve this read!
    93  	//
    94  	// TODO(tschottdorf): once a read for a timestamp T has been served, the replica may
    95  	// serve reads for that and smaller timestamps forever.
    96  	log.Event(ctx, "serving via follower read")
    97  	r.store.metrics.FollowerReadsCount.Inc(1)
    98  	return nil
    99  }
   100  
   101  // maxClosed returns the maximum closed timestamp for this range.
   102  // It is computed as the most recent of the known closed timestamp for the
   103  // current lease holder for this range as tracked by the closed timestamp
   104  // subsystem and the start time of the current lease. It is safe to use the
   105  // start time of the current lease because leasePostApply bumps the timestamp
   106  // cache forward to at least the new lease start time. Using this combination
   107  // allows the closed timestamp mechanism to be robust to lease transfers.
   108  // If the ok return value is false, the Replica is a member of a range which
   109  // uses an expiration-based lease. Expiration-based leases do not support the
   110  // closed timestamp subsystem. A zero-value timestamp will be returned if ok
   111  // is false.
   112  func (r *Replica) maxClosed(ctx context.Context) (_ hlc.Timestamp, ok bool) {
   113  	r.mu.RLock()
   114  	lai := r.mu.state.LeaseAppliedIndex
   115  	lease := *r.mu.state.Lease
   116  	initialMaxClosed := r.mu.initialMaxClosed
   117  	r.mu.RUnlock()
   118  	if lease.Expiration != nil {
   119  		return hlc.Timestamp{}, false
   120  	}
   121  	maxClosed := r.store.cfg.ClosedTimestamp.Provider.MaxClosed(
   122  		lease.Replica.NodeID, r.RangeID, ctpb.Epoch(lease.Epoch), ctpb.LAI(lai))
   123  	maxClosed.Forward(lease.Start)
   124  	maxClosed.Forward(initialMaxClosed)
   125  	return maxClosed, true
   126  }