github.com/KinWaiYuen/client-go/v2@v2.5.4/txnkv/txnsnapshot/snapshot.go (about)

     1  // Copyright 2021 TiKV Authors
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  // NOTE: The code in this file is based on code from the
    16  // TiDB project, licensed under the Apache License v 2.0
    17  //
    18  // https://github.com/pingcap/tidb/tree/cc5e161ac06827589c4966674597c137cc9e809c/store/tikv/snapshot.go
    19  //
    20  
    21  // Copyright 2015 PingCAP, Inc.
    22  //
    23  // Licensed under the Apache License, Version 2.0 (the "License");
    24  // you may not use this file except in compliance with the License.
    25  // You may obtain a copy of the License at
    26  //
    27  //     http://www.apache.org/licenses/LICENSE-2.0
    28  //
    29  // Unless required by applicable law or agreed to in writing, software
    30  // distributed under the License is distributed on an "AS IS" BASIS,
    31  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    32  // See the License for the specific language governing permissions and
    33  // limitations under the License.
    34  
    35  package txnsnapshot
    36  
    37  import (
    38  	"bytes"
    39  	"context"
    40  	"fmt"
    41  	"math"
    42  	"sync"
    43  	"sync/atomic"
    44  	"time"
    45  
    46  	tikverr "github.com/KinWaiYuen/client-go/v2/error"
    47  	"github.com/KinWaiYuen/client-go/v2/internal/client"
    48  	"github.com/KinWaiYuen/client-go/v2/internal/locate"
    49  	"github.com/KinWaiYuen/client-go/v2/internal/logutil"
    50  	"github.com/KinWaiYuen/client-go/v2/internal/retry"
    51  	"github.com/KinWaiYuen/client-go/v2/internal/unionstore"
    52  	"github.com/KinWaiYuen/client-go/v2/kv"
    53  	"github.com/KinWaiYuen/client-go/v2/metrics"
    54  	"github.com/KinWaiYuen/client-go/v2/oracle"
    55  	"github.com/KinWaiYuen/client-go/v2/tikvrpc"
    56  	"github.com/KinWaiYuen/client-go/v2/txnkv/txnlock"
    57  	"github.com/KinWaiYuen/client-go/v2/txnkv/txnutil"
    58  	"github.com/KinWaiYuen/client-go/v2/util"
    59  	"github.com/opentracing/opentracing-go"
    60  	"github.com/pingcap/errors"
    61  	"github.com/pingcap/failpoint"
    62  	"github.com/pingcap/kvproto/pkg/kvrpcpb"
    63  	"github.com/pingcap/kvproto/pkg/metapb"
    64  	"go.uber.org/zap"
    65  )
    66  
    67  const (
    68  	defaultScanBatchSize = 256
    69  	batchGetSize         = 5120
    70  	maxTimestamp         = math.MaxUint64
    71  )
    72  
    73  // IsoLevel is the transaction's isolation level.
    74  type IsoLevel kvrpcpb.IsolationLevel
    75  
    76  const (
    77  	// SI stands for 'snapshot isolation'.
    78  	SI IsoLevel = IsoLevel(kvrpcpb.IsolationLevel_SI)
    79  	// RC stands for 'read committed'.
    80  	RC IsoLevel = IsoLevel(kvrpcpb.IsolationLevel_RC)
    81  )
    82  
    83  // ToPB converts isolation level to wire type.
    84  func (l IsoLevel) ToPB() kvrpcpb.IsolationLevel {
    85  	return kvrpcpb.IsolationLevel(l)
    86  }
    87  
    88  type kvstore interface {
    89  	CheckVisibility(startTime uint64) error
    90  	// GetRegionCache gets the RegionCache.
    91  	GetRegionCache() *locate.RegionCache
    92  	GetLockResolver() *txnlock.LockResolver
    93  	GetTiKVClient() (client client.Client)
    94  
    95  	// SendReq sends a request to TiKV.
    96  	SendReq(bo *retry.Backoffer, req *tikvrpc.Request, regionID locate.RegionVerID, timeout time.Duration) (*tikvrpc.Response, error)
    97  	// GetOracle gets a timestamp oracle client.
    98  	GetOracle() oracle.Oracle
    99  }
   100  
   101  // KVSnapshot implements the tidbkv.Snapshot interface.
   102  type KVSnapshot struct {
   103  	store           kvstore
   104  	version         uint64
   105  	isolationLevel  IsoLevel
   106  	priority        txnutil.Priority
   107  	notFillCache    bool
   108  	keyOnly         bool
   109  	vars            *kv.Variables
   110  	replicaReadSeed uint32
   111  	resolvedLocks   util.TSSet
   112  	scanBatchSize   int
   113  
   114  	// Cache the result of BatchGet.
   115  	// The invariance is that calling BatchGet multiple times using the same start ts,
   116  	// the result should not change.
   117  	// NOTE: This representation here is different from the BatchGet API.
   118  	// cached use len(value)=0 to represent a key-value entry doesn't exist (a reliable truth from TiKV).
   119  	// In the BatchGet API, it use no key-value entry to represent non-exist.
   120  	// It's OK as long as there are no zero-byte values in the protocol.
   121  	mu struct {
   122  		sync.RWMutex
   123  		hitCnt           int64
   124  		cached           map[string][]byte
   125  		cachedSize       int
   126  		stats            *SnapshotRuntimeStats
   127  		replicaRead      kv.ReplicaReadType
   128  		taskID           uint64
   129  		isStaleness      bool
   130  		readReplicaScope string
   131  		// MatchStoreLabels indicates the labels the store should be matched
   132  		matchStoreLabels []*metapb.StoreLabel
   133  	}
   134  	sampleStep uint32
   135  	// resourceGroupTag is use to set the kv request resource group tag.
   136  	resourceGroupTag []byte
   137  }
   138  
   139  // NewTiKVSnapshot creates a snapshot of an TiKV store.
   140  func NewTiKVSnapshot(store kvstore, ts uint64, replicaReadSeed uint32) *KVSnapshot {
   141  	// Sanity check for snapshot version.
   142  	if ts >= math.MaxInt64 && ts != math.MaxUint64 {
   143  		err := errors.Errorf("try to get snapshot with a large ts %d", ts)
   144  		panic(err)
   145  	}
   146  	return &KVSnapshot{
   147  		store:           store,
   148  		version:         ts,
   149  		scanBatchSize:   defaultScanBatchSize,
   150  		priority:        txnutil.PriorityNormal,
   151  		vars:            kv.DefaultVars,
   152  		replicaReadSeed: replicaReadSeed,
   153  	}
   154  }
   155  
   156  const batchGetMaxBackoff = 600000 // 10 minutes
   157  
   158  // SetSnapshotTS resets the timestamp for reads.
   159  func (s *KVSnapshot) SetSnapshotTS(ts uint64) {
   160  	// Sanity check for snapshot version.
   161  	if ts >= math.MaxInt64 && ts != math.MaxUint64 {
   162  		err := errors.Errorf("try to get snapshot with a large ts %d", ts)
   163  		panic(err)
   164  	}
   165  	// Invalidate cache if the snapshotTS change!
   166  	s.version = ts
   167  	s.mu.Lock()
   168  	s.mu.cached = nil
   169  	s.mu.Unlock()
   170  	// And also remove the minCommitTS pushed information.
   171  	s.resolvedLocks = util.TSSet{}
   172  }
   173  
   174  // BatchGet gets all the keys' value from kv-server and returns a map contains key/value pairs.
   175  // The map will not contain nonexistent keys.
   176  // NOTE: Don't modify keys. Some codes rely on the order of keys.
   177  func (s *KVSnapshot) BatchGet(ctx context.Context, keys [][]byte) (map[string][]byte, error) {
   178  	// Check the cached value first.
   179  	m := make(map[string][]byte)
   180  	s.mu.RLock()
   181  	if s.mu.cached != nil {
   182  		tmp := make([][]byte, 0, len(keys))
   183  		for _, key := range keys {
   184  			if val, ok := s.mu.cached[string(key)]; ok {
   185  				atomic.AddInt64(&s.mu.hitCnt, 1)
   186  				if len(val) > 0 {
   187  					m[string(key)] = val
   188  				}
   189  			} else {
   190  				tmp = append(tmp, key)
   191  			}
   192  		}
   193  		keys = tmp
   194  	}
   195  	s.mu.RUnlock()
   196  
   197  	if len(keys) == 0 {
   198  		return m, nil
   199  	}
   200  
   201  	ctx = context.WithValue(ctx, retry.TxnStartKey, s.version)
   202  	bo := retry.NewBackofferWithVars(ctx, batchGetMaxBackoff, s.vars)
   203  
   204  	// Create a map to collect key-values from region servers.
   205  	var mu sync.Mutex
   206  	err := s.batchGetKeysByRegions(bo, keys, func(k, v []byte) {
   207  		if len(v) == 0 {
   208  			return
   209  		}
   210  
   211  		mu.Lock()
   212  		m[string(k)] = v
   213  		mu.Unlock()
   214  	})
   215  	s.recordBackoffInfo(bo)
   216  	if err != nil {
   217  		return nil, errors.Trace(err)
   218  	}
   219  
   220  	err = s.store.CheckVisibility(s.version)
   221  	if err != nil {
   222  		return nil, errors.Trace(err)
   223  	}
   224  
   225  	// Update the cache.
   226  	s.mu.Lock()
   227  	if s.mu.cached == nil {
   228  		s.mu.cached = make(map[string][]byte, len(m))
   229  	}
   230  	for _, key := range keys {
   231  		val := m[string(key)]
   232  		s.mu.cachedSize += len(key) + len(val)
   233  		s.mu.cached[string(key)] = val
   234  	}
   235  
   236  	const cachedSizeLimit = 10 << 30
   237  	if s.mu.cachedSize >= cachedSizeLimit {
   238  		for k, v := range s.mu.cached {
   239  			if _, needed := m[k]; needed {
   240  				continue
   241  			}
   242  			delete(s.mu.cached, k)
   243  			s.mu.cachedSize -= len(k) + len(v)
   244  			if s.mu.cachedSize < cachedSizeLimit {
   245  				break
   246  			}
   247  		}
   248  	}
   249  	s.mu.Unlock()
   250  
   251  	return m, nil
   252  }
   253  
   254  type batchKeys struct {
   255  	region locate.RegionVerID
   256  	keys   [][]byte
   257  }
   258  
   259  func (b *batchKeys) relocate(bo *retry.Backoffer, c *locate.RegionCache) (bool, error) {
   260  	loc, err := c.LocateKey(bo, b.keys[0])
   261  	if err != nil {
   262  		return false, errors.Trace(err)
   263  	}
   264  	// keys is not in order, so we have to iterate all keys.
   265  	for i := 1; i < len(b.keys); i++ {
   266  		if !loc.Contains(b.keys[i]) {
   267  			return false, nil
   268  		}
   269  	}
   270  	b.region = loc.Region
   271  	return true, nil
   272  }
   273  
   274  // appendBatchKeysBySize appends keys to b. It may split the keys to make
   275  // sure each batch's size does not exceed the limit.
   276  func appendBatchKeysBySize(b []batchKeys, region locate.RegionVerID, keys [][]byte, sizeFn func([]byte) int, limit int) []batchKeys {
   277  	var start, end int
   278  	for start = 0; start < len(keys); start = end {
   279  		var size int
   280  		for end = start; end < len(keys) && size < limit; end++ {
   281  			size += sizeFn(keys[end])
   282  		}
   283  		b = append(b, batchKeys{
   284  			region: region,
   285  			keys:   keys[start:end],
   286  		})
   287  	}
   288  	return b
   289  }
   290  
   291  func (s *KVSnapshot) batchGetKeysByRegions(bo *retry.Backoffer, keys [][]byte, collectF func(k, v []byte)) error {
   292  	defer func(start time.Time) {
   293  		metrics.TxnCmdHistogramWithBatchGet.Observe(time.Since(start).Seconds())
   294  	}(time.Now())
   295  	groups, _, err := s.store.GetRegionCache().GroupKeysByRegion(bo, keys, nil)
   296  	if err != nil {
   297  		return errors.Trace(err)
   298  	}
   299  
   300  	metrics.TxnRegionsNumHistogramWithSnapshot.Observe(float64(len(groups)))
   301  
   302  	var batches []batchKeys
   303  	for id, g := range groups {
   304  		batches = appendBatchKeysBySize(batches, id, g, func([]byte) int { return 1 }, batchGetSize)
   305  	}
   306  
   307  	if len(batches) == 0 {
   308  		return nil
   309  	}
   310  	if len(batches) == 1 {
   311  		return errors.Trace(s.batchGetSingleRegion(bo, batches[0], collectF))
   312  	}
   313  	ch := make(chan error)
   314  	for _, batch1 := range batches {
   315  		batch := batch1
   316  		go func() {
   317  			backoffer, cancel := bo.Fork()
   318  			defer cancel()
   319  			ch <- s.batchGetSingleRegion(backoffer, batch, collectF)
   320  		}()
   321  	}
   322  	for i := 0; i < len(batches); i++ {
   323  		if e := <-ch; e != nil {
   324  			logutil.BgLogger().Debug("snapshot batchGet failed",
   325  				zap.Error(e),
   326  				zap.Uint64("txnStartTS", s.version))
   327  			err = e
   328  		}
   329  	}
   330  	return errors.Trace(err)
   331  }
   332  
   333  func (s *KVSnapshot) batchGetSingleRegion(bo *retry.Backoffer, batch batchKeys, collectF func(k, v []byte)) error {
   334  	cli := NewClientHelper(s.store, &s.resolvedLocks, false)
   335  	s.mu.RLock()
   336  	if s.mu.stats != nil {
   337  		cli.Stats = make(map[tikvrpc.CmdType]*locate.RPCRuntimeStats)
   338  		defer func() {
   339  			s.mergeRegionRequestStats(cli.Stats)
   340  		}()
   341  	}
   342  	s.mu.RUnlock()
   343  
   344  	pending := batch.keys
   345  	for {
   346  		s.mu.RLock()
   347  		req := tikvrpc.NewReplicaReadRequest(tikvrpc.CmdBatchGet, &kvrpcpb.BatchGetRequest{
   348  			Keys:    pending,
   349  			Version: s.version,
   350  		}, s.mu.replicaRead, &s.replicaReadSeed, kvrpcpb.Context{
   351  			Priority:         s.priority.ToPB(),
   352  			NotFillCache:     s.notFillCache,
   353  			TaskId:           s.mu.taskID,
   354  			ResourceGroupTag: s.resourceGroupTag,
   355  		})
   356  		scope := s.mu.readReplicaScope
   357  		isStaleness := s.mu.isStaleness
   358  		matchStoreLabels := s.mu.matchStoreLabels
   359  		s.mu.RUnlock()
   360  		req.TxnScope = scope
   361  		req.ReadReplicaScope = scope
   362  		if isStaleness {
   363  			req.EnableStaleRead()
   364  		}
   365  		ops := make([]locate.StoreSelectorOption, 0, 2)
   366  		if len(matchStoreLabels) > 0 {
   367  			ops = append(ops, locate.WithMatchLabels(matchStoreLabels))
   368  		}
   369  		resp, _, _, err := cli.SendReqCtx(bo, req, batch.region, client.ReadTimeoutMedium, tikvrpc.TiKV, "", ops...)
   370  		if err != nil {
   371  			return errors.Trace(err)
   372  		}
   373  		regionErr, err := resp.GetRegionError()
   374  		if err != nil {
   375  			return errors.Trace(err)
   376  		}
   377  		if regionErr != nil {
   378  			// For other region error and the fake region error, backoff because
   379  			// there's something wrong.
   380  			// For the real EpochNotMatch error, don't backoff.
   381  			if regionErr.GetEpochNotMatch() == nil || locate.IsFakeRegionError(regionErr) {
   382  				err = bo.Backoff(retry.BoRegionMiss, errors.New(regionErr.String()))
   383  				if err != nil {
   384  					return errors.Trace(err)
   385  				}
   386  			}
   387  			same, err := batch.relocate(bo, cli.regionCache)
   388  			if err != nil {
   389  				return errors.Trace(err)
   390  			}
   391  			if same {
   392  				continue
   393  			}
   394  			err = s.batchGetKeysByRegions(bo, pending, collectF)
   395  			return errors.Trace(err)
   396  		}
   397  		if resp.Resp == nil {
   398  			return errors.Trace(tikverr.ErrBodyMissing)
   399  		}
   400  		batchGetResp := resp.Resp.(*kvrpcpb.BatchGetResponse)
   401  		var (
   402  			lockedKeys [][]byte
   403  			locks      []*txnlock.Lock
   404  		)
   405  		if keyErr := batchGetResp.GetError(); keyErr != nil {
   406  			// If a response-level error happens, skip reading pairs.
   407  			lock, err := txnlock.ExtractLockFromKeyErr(keyErr)
   408  			if err != nil {
   409  				return errors.Trace(err)
   410  			}
   411  			lockedKeys = append(lockedKeys, lock.Key)
   412  			locks = append(locks, lock)
   413  		} else {
   414  			for _, pair := range batchGetResp.Pairs {
   415  				keyErr := pair.GetError()
   416  				if keyErr == nil {
   417  					collectF(pair.GetKey(), pair.GetValue())
   418  					continue
   419  				}
   420  				lock, err := txnlock.ExtractLockFromKeyErr(keyErr)
   421  				if err != nil {
   422  					return errors.Trace(err)
   423  				}
   424  				lockedKeys = append(lockedKeys, lock.Key)
   425  				locks = append(locks, lock)
   426  			}
   427  		}
   428  		if batchGetResp.ExecDetailsV2 != nil {
   429  			readKeys := len(batchGetResp.Pairs)
   430  			readTime := float64(batchGetResp.ExecDetailsV2.GetTimeDetail().GetKvReadWallTimeMs() / 1000)
   431  			metrics.ObserveReadSLI(uint64(readKeys), readTime)
   432  			s.mergeExecDetail(batchGetResp.ExecDetailsV2)
   433  		}
   434  		if len(lockedKeys) > 0 {
   435  			msBeforeExpired, err := cli.ResolveLocks(bo, s.version, locks)
   436  			if err != nil {
   437  				return errors.Trace(err)
   438  			}
   439  			if msBeforeExpired > 0 {
   440  				err = bo.BackoffWithMaxSleepTxnLockFast(int(msBeforeExpired), errors.Errorf("batchGet lockedKeys: %d", len(lockedKeys)))
   441  				if err != nil {
   442  					return errors.Trace(err)
   443  				}
   444  			}
   445  			// Only reduce pending keys when there is no response-level error. Otherwise,
   446  			// lockedKeys may be incomplete.
   447  			if batchGetResp.GetError() == nil {
   448  				pending = lockedKeys
   449  			}
   450  			continue
   451  		}
   452  		return nil
   453  	}
   454  }
   455  
   456  const getMaxBackoff = 600000 // 10 minutes
   457  
   458  // Get gets the value for key k from snapshot.
   459  func (s *KVSnapshot) Get(ctx context.Context, k []byte) ([]byte, error) {
   460  	defer func(start time.Time) {
   461  		metrics.TxnCmdHistogramWithGet.Observe(time.Since(start).Seconds())
   462  	}(time.Now())
   463  
   464  	ctx = context.WithValue(ctx, retry.TxnStartKey, s.version)
   465  	bo := retry.NewBackofferWithVars(ctx, getMaxBackoff, s.vars)
   466  	val, err := s.get(ctx, bo, k)
   467  	s.recordBackoffInfo(bo)
   468  	if err != nil {
   469  		return nil, errors.Trace(err)
   470  	}
   471  	err = s.store.CheckVisibility(s.version)
   472  	if err != nil {
   473  		return nil, errors.Trace(err)
   474  	}
   475  
   476  	if len(val) == 0 {
   477  		return nil, tikverr.ErrNotExist
   478  	}
   479  	return val, nil
   480  }
   481  
   482  func (s *KVSnapshot) get(ctx context.Context, bo *retry.Backoffer, k []byte) ([]byte, error) {
   483  	// Check the cached values first.
   484  	s.mu.RLock()
   485  	if s.mu.cached != nil {
   486  		if value, ok := s.mu.cached[string(k)]; ok {
   487  			atomic.AddInt64(&s.mu.hitCnt, 1)
   488  			s.mu.RUnlock()
   489  			return value, nil
   490  		}
   491  	}
   492  	s.mu.RUnlock()
   493  	if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
   494  		span1 := span.Tracer().StartSpan("tikvSnapshot.get", opentracing.ChildOf(span.Context()))
   495  		defer span1.Finish()
   496  		opentracing.ContextWithSpan(ctx, span1)
   497  	}
   498  	failpoint.Inject("snapshot-get-cache-fail", func(_ failpoint.Value) {
   499  		if bo.GetCtx().Value("TestSnapshotCache") != nil {
   500  			panic("cache miss")
   501  		}
   502  	})
   503  
   504  	cli := NewClientHelper(s.store, &s.resolvedLocks, true)
   505  
   506  	s.mu.RLock()
   507  	if s.mu.stats != nil {
   508  		cli.Stats = make(map[tikvrpc.CmdType]*locate.RPCRuntimeStats)
   509  		defer func() {
   510  			s.mergeRegionRequestStats(cli.Stats)
   511  		}()
   512  	}
   513  	req := tikvrpc.NewReplicaReadRequest(tikvrpc.CmdGet,
   514  		&kvrpcpb.GetRequest{
   515  			Key:     k,
   516  			Version: s.version,
   517  		}, s.mu.replicaRead, &s.replicaReadSeed, kvrpcpb.Context{
   518  			Priority:         s.priority.ToPB(),
   519  			NotFillCache:     s.notFillCache,
   520  			TaskId:           s.mu.taskID,
   521  			ResourceGroupTag: s.resourceGroupTag,
   522  		})
   523  	isStaleness := s.mu.isStaleness
   524  	matchStoreLabels := s.mu.matchStoreLabels
   525  	scope := s.mu.readReplicaScope
   526  	s.mu.RUnlock()
   527  	req.TxnScope = scope
   528  	req.ReadReplicaScope = scope
   529  	var ops []locate.StoreSelectorOption
   530  	if isStaleness {
   531  		req.EnableStaleRead()
   532  	}
   533  	if len(matchStoreLabels) > 0 {
   534  		ops = append(ops, locate.WithMatchLabels(matchStoreLabels))
   535  	}
   536  
   537  	var firstLock *txnlock.Lock
   538  	for {
   539  		util.EvalFailpoint("beforeSendPointGet")
   540  		loc, err := s.store.GetRegionCache().LocateKey(bo, k)
   541  		if err != nil {
   542  			return nil, errors.Trace(err)
   543  		}
   544  		resp, _, _, err := cli.SendReqCtx(bo, req, loc.Region, client.ReadTimeoutShort, tikvrpc.TiKV, "", ops...)
   545  		if err != nil {
   546  			return nil, errors.Trace(err)
   547  		}
   548  		regionErr, err := resp.GetRegionError()
   549  		if err != nil {
   550  			return nil, errors.Trace(err)
   551  		}
   552  		if regionErr != nil {
   553  			// For other region error and the fake region error, backoff because
   554  			// there's something wrong.
   555  			// For the real EpochNotMatch error, don't backoff.
   556  			if regionErr.GetEpochNotMatch() == nil || locate.IsFakeRegionError(regionErr) {
   557  				err = bo.Backoff(retry.BoRegionMiss, errors.New(regionErr.String()))
   558  				if err != nil {
   559  					return nil, errors.Trace(err)
   560  				}
   561  			}
   562  			continue
   563  		}
   564  		if resp.Resp == nil {
   565  			return nil, errors.Trace(tikverr.ErrBodyMissing)
   566  		}
   567  		cmdGetResp := resp.Resp.(*kvrpcpb.GetResponse)
   568  		if cmdGetResp.ExecDetailsV2 != nil {
   569  			readKeys := len(cmdGetResp.Value)
   570  			readTime := float64(cmdGetResp.ExecDetailsV2.GetTimeDetail().GetKvReadWallTimeMs() / 1000)
   571  			metrics.ObserveReadSLI(uint64(readKeys), readTime)
   572  			s.mergeExecDetail(cmdGetResp.ExecDetailsV2)
   573  		}
   574  		val := cmdGetResp.GetValue()
   575  		if keyErr := cmdGetResp.GetError(); keyErr != nil {
   576  			lock, err := txnlock.ExtractLockFromKeyErr(keyErr)
   577  			if err != nil {
   578  				return nil, errors.Trace(err)
   579  			}
   580  			if firstLock == nil {
   581  				firstLock = lock
   582  			} else if s.version == maxTimestamp && firstLock.TxnID != lock.TxnID {
   583  				// If it is an autocommit point get, it needs to be blocked only
   584  				// by the first lock it meets. During retries, if the encountered
   585  				// lock is different from the first one, we can omit it.
   586  				cli.resolvedLocks.Put(lock.TxnID)
   587  				continue
   588  			}
   589  
   590  			msBeforeExpired, err := cli.ResolveLocks(bo, s.version, []*txnlock.Lock{lock})
   591  			if err != nil {
   592  				return nil, errors.Trace(err)
   593  			}
   594  			if msBeforeExpired > 0 {
   595  				err = bo.BackoffWithMaxSleepTxnLockFast(int(msBeforeExpired), errors.New(keyErr.String()))
   596  				if err != nil {
   597  					return nil, errors.Trace(err)
   598  				}
   599  			}
   600  			continue
   601  		}
   602  		return val, nil
   603  	}
   604  }
   605  
   606  func (s *KVSnapshot) mergeExecDetail(detail *kvrpcpb.ExecDetailsV2) {
   607  	s.mu.Lock()
   608  	defer s.mu.Unlock()
   609  	if detail == nil || s.mu.stats == nil {
   610  		return
   611  	}
   612  	if s.mu.stats.scanDetail == nil {
   613  		s.mu.stats.scanDetail = &util.ScanDetail{}
   614  	}
   615  	if s.mu.stats.timeDetail == nil {
   616  		s.mu.stats.timeDetail = &util.TimeDetail{}
   617  	}
   618  	s.mu.stats.scanDetail.MergeFromScanDetailV2(detail.ScanDetailV2)
   619  	s.mu.stats.timeDetail.MergeFromTimeDetail(detail.TimeDetail)
   620  }
   621  
   622  // Iter return a list of key-value pair after `k`.
   623  func (s *KVSnapshot) Iter(k []byte, upperBound []byte) (unionstore.Iterator, error) {
   624  	scanner, err := newScanner(s, k, upperBound, s.scanBatchSize, false)
   625  	return scanner, errors.Trace(err)
   626  }
   627  
   628  // IterReverse creates a reversed Iterator positioned on the first entry which key is less than k.
   629  func (s *KVSnapshot) IterReverse(k []byte) (unionstore.Iterator, error) {
   630  	scanner, err := newScanner(s, nil, k, s.scanBatchSize, true)
   631  	return scanner, errors.Trace(err)
   632  }
   633  
   634  // SetNotFillCache indicates whether tikv should skip filling cache when
   635  // loading data.
   636  func (s *KVSnapshot) SetNotFillCache(b bool) {
   637  	s.notFillCache = b
   638  }
   639  
   640  // SetKeyOnly indicates if tikv can return only keys.
   641  func (s *KVSnapshot) SetKeyOnly(b bool) {
   642  	s.keyOnly = b
   643  }
   644  
   645  // SetScanBatchSize sets the scan batchSize used to scan data from tikv.
   646  func (s *KVSnapshot) SetScanBatchSize(batchSize int) {
   647  	s.scanBatchSize = batchSize
   648  }
   649  
   650  // SetReplicaRead sets up the replica read type.
   651  func (s *KVSnapshot) SetReplicaRead(readType kv.ReplicaReadType) {
   652  	s.mu.Lock()
   653  	defer s.mu.Unlock()
   654  	s.mu.replicaRead = readType
   655  }
   656  
   657  // SetIsolationLevel sets the isolation level used to scan data from tikv.
   658  func (s *KVSnapshot) SetIsolationLevel(level IsoLevel) {
   659  	s.isolationLevel = level
   660  }
   661  
   662  // SetSampleStep skips 'step - 1' number of keys after each returned key.
   663  func (s *KVSnapshot) SetSampleStep(step uint32) {
   664  	s.sampleStep = step
   665  }
   666  
   667  // SetPriority sets the priority for tikv to execute commands.
   668  func (s *KVSnapshot) SetPriority(pri txnutil.Priority) {
   669  	s.priority = pri
   670  }
   671  
   672  // SetTaskID marks current task's unique ID to allow TiKV to schedule
   673  // tasks more fairly.
   674  func (s *KVSnapshot) SetTaskID(id uint64) {
   675  	s.mu.Lock()
   676  	defer s.mu.Unlock()
   677  	s.mu.taskID = id
   678  }
   679  
   680  // SetRuntimeStats sets the stats to collect runtime statistics.
   681  // Set it to nil to clear stored stats.
   682  func (s *KVSnapshot) SetRuntimeStats(stats *SnapshotRuntimeStats) {
   683  	s.mu.Lock()
   684  	defer s.mu.Unlock()
   685  	s.mu.stats = stats
   686  }
   687  
   688  // SetTxnScope is same as SetReadReplicaScope, keep it in order to keep compatible for now.
   689  func (s *KVSnapshot) SetTxnScope(scope string) {
   690  	s.mu.Lock()
   691  	defer s.mu.Unlock()
   692  	s.mu.readReplicaScope = scope
   693  }
   694  
   695  // SetReadReplicaScope set read replica scope
   696  func (s *KVSnapshot) SetReadReplicaScope(scope string) {
   697  	s.mu.Lock()
   698  	defer s.mu.Unlock()
   699  	s.mu.readReplicaScope = scope
   700  }
   701  
   702  // SetIsStatenessReadOnly indicates whether the transaction is staleness read only transaction
   703  func (s *KVSnapshot) SetIsStatenessReadOnly(b bool) {
   704  	s.mu.Lock()
   705  	defer s.mu.Unlock()
   706  	s.mu.isStaleness = b
   707  }
   708  
   709  // SetMatchStoreLabels sets up labels to filter target stores.
   710  func (s *KVSnapshot) SetMatchStoreLabels(labels []*metapb.StoreLabel) {
   711  	s.mu.Lock()
   712  	defer s.mu.Unlock()
   713  	s.mu.matchStoreLabels = labels
   714  }
   715  
   716  // SetResourceGroupTag sets resource group of the kv request.
   717  func (s *KVSnapshot) SetResourceGroupTag(tag []byte) {
   718  	s.resourceGroupTag = tag
   719  }
   720  
   721  // SnapCacheHitCount gets the snapshot cache hit count. Only for test.
   722  func (s *KVSnapshot) SnapCacheHitCount() int {
   723  	return int(atomic.LoadInt64(&s.mu.hitCnt))
   724  }
   725  
   726  // SnapCacheSize gets the snapshot cache size. Only for test.
   727  func (s *KVSnapshot) SnapCacheSize() int {
   728  	s.mu.RLock()
   729  	defer s.mu.RLock()
   730  	return len(s.mu.cached)
   731  }
   732  
   733  // SetVars sets variables to the transaction.
   734  func (s *KVSnapshot) SetVars(vars *kv.Variables) {
   735  	s.vars = vars
   736  }
   737  
   738  func (s *KVSnapshot) recordBackoffInfo(bo *retry.Backoffer) {
   739  	s.mu.RLock()
   740  	if s.mu.stats == nil || bo.GetTotalSleep() == 0 {
   741  		s.mu.RUnlock()
   742  		return
   743  	}
   744  	s.mu.RUnlock()
   745  	s.mu.Lock()
   746  	defer s.mu.Unlock()
   747  	if s.mu.stats == nil {
   748  		return
   749  	}
   750  	if s.mu.stats.backoffSleepMS == nil {
   751  		s.mu.stats.backoffSleepMS = bo.GetBackoffSleepMS()
   752  		s.mu.stats.backoffTimes = bo.GetBackoffTimes()
   753  		return
   754  	}
   755  	for k, v := range bo.GetBackoffSleepMS() {
   756  		s.mu.stats.backoffSleepMS[k] += v
   757  	}
   758  	for k, v := range bo.GetBackoffTimes() {
   759  		s.mu.stats.backoffTimes[k] += v
   760  	}
   761  }
   762  
   763  func (s *KVSnapshot) mergeRegionRequestStats(stats map[tikvrpc.CmdType]*locate.RPCRuntimeStats) {
   764  	s.mu.Lock()
   765  	defer s.mu.Unlock()
   766  	if s.mu.stats == nil {
   767  		return
   768  	}
   769  	if s.mu.stats.rpcStats.Stats == nil {
   770  		s.mu.stats.rpcStats.Stats = stats
   771  		return
   772  	}
   773  	for k, v := range stats {
   774  		stat, ok := s.mu.stats.rpcStats.Stats[k]
   775  		if !ok {
   776  			s.mu.stats.rpcStats.Stats[k] = v
   777  			continue
   778  		}
   779  		stat.Count += v.Count
   780  		stat.Consume += v.Consume
   781  	}
   782  }
   783  
   784  // SnapshotRuntimeStats records the runtime stats of snapshot.
   785  type SnapshotRuntimeStats struct {
   786  	rpcStats       locate.RegionRequestRuntimeStats
   787  	backoffSleepMS map[string]int
   788  	backoffTimes   map[string]int
   789  	scanDetail     *util.ScanDetail
   790  	timeDetail     *util.TimeDetail
   791  }
   792  
   793  // Clone implements the RuntimeStats interface.
   794  func (rs *SnapshotRuntimeStats) Clone() *SnapshotRuntimeStats {
   795  	newRs := SnapshotRuntimeStats{rpcStats: locate.NewRegionRequestRuntimeStats()}
   796  	if rs.rpcStats.Stats != nil {
   797  		for k, v := range rs.rpcStats.Stats {
   798  			newRs.rpcStats.Stats[k] = v
   799  		}
   800  	}
   801  	if len(rs.backoffSleepMS) > 0 {
   802  		newRs.backoffSleepMS = make(map[string]int)
   803  		newRs.backoffTimes = make(map[string]int)
   804  		for k, v := range rs.backoffSleepMS {
   805  			newRs.backoffSleepMS[k] += v
   806  		}
   807  		for k, v := range rs.backoffTimes {
   808  			newRs.backoffTimes[k] += v
   809  		}
   810  	}
   811  	return &newRs
   812  }
   813  
   814  // Merge implements the RuntimeStats interface.
   815  func (rs *SnapshotRuntimeStats) Merge(other *SnapshotRuntimeStats) {
   816  	if other.rpcStats.Stats != nil {
   817  		if rs.rpcStats.Stats == nil {
   818  			rs.rpcStats.Stats = make(map[tikvrpc.CmdType]*locate.RPCRuntimeStats, len(other.rpcStats.Stats))
   819  		}
   820  		rs.rpcStats.Merge(other.rpcStats)
   821  	}
   822  	if len(other.backoffSleepMS) > 0 {
   823  		if rs.backoffSleepMS == nil {
   824  			rs.backoffSleepMS = make(map[string]int)
   825  		}
   826  		if rs.backoffTimes == nil {
   827  			rs.backoffTimes = make(map[string]int)
   828  		}
   829  		for k, v := range other.backoffSleepMS {
   830  			rs.backoffSleepMS[k] += v
   831  		}
   832  		for k, v := range other.backoffTimes {
   833  			rs.backoffTimes[k] += v
   834  		}
   835  	}
   836  }
   837  
   838  // String implements fmt.Stringer interface.
   839  func (rs *SnapshotRuntimeStats) String() string {
   840  	var buf bytes.Buffer
   841  	buf.WriteString(rs.rpcStats.String())
   842  	for k, v := range rs.backoffTimes {
   843  		if buf.Len() > 0 {
   844  			buf.WriteByte(',')
   845  		}
   846  		ms := rs.backoffSleepMS[k]
   847  		d := time.Duration(ms) * time.Millisecond
   848  		buf.WriteString(fmt.Sprintf("%s_backoff:{num:%d, total_time:%s}", k, v, util.FormatDuration(d)))
   849  	}
   850  	timeDetail := rs.timeDetail.String()
   851  	if timeDetail != "" {
   852  		buf.WriteString(", ")
   853  		buf.WriteString(timeDetail)
   854  	}
   855  	scanDetail := rs.scanDetail.String()
   856  	if scanDetail != "" {
   857  		buf.WriteString(", ")
   858  		buf.WriteString(scanDetail)
   859  	}
   860  	return buf.String()
   861  }