github.com/KinWaiYuen/client-go/v2@v2.5.4/txnkv/transaction/pessimistic.go (about) 1 // Copyright 2021 TiKV Authors 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 // NOTE: The code in this file is based on code from the 16 // TiDB project, licensed under the Apache License v 2.0 17 // 18 // https://github.com/pingcap/tidb/tree/cc5e161ac06827589c4966674597c137cc9e809c/store/tikv/pessimistic.go 19 // 20 21 // Copyright 2020 PingCAP, Inc. 22 // 23 // Licensed under the Apache License, Version 2.0 (the "License"); 24 // you may not use this file except in compliance with the License. 25 // You may obtain a copy of the License at 26 // 27 // http://www.apache.org/licenses/LICENSE-2.0 28 // 29 // Unless required by applicable law or agreed to in writing, software 30 // distributed under the License is distributed on an "AS IS" BASIS, 31 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 32 // See the License for the specific language governing permissions and 33 // limitations under the License. 34 35 package transaction 36 37 import ( 38 "encoding/hex" 39 "math/rand" 40 "strings" 41 "sync/atomic" 42 "time" 43 44 tikverr "github.com/KinWaiYuen/client-go/v2/error" 45 "github.com/KinWaiYuen/client-go/v2/internal/client" 46 "github.com/KinWaiYuen/client-go/v2/internal/locate" 47 "github.com/KinWaiYuen/client-go/v2/internal/logutil" 48 "github.com/KinWaiYuen/client-go/v2/internal/retry" 49 "github.com/KinWaiYuen/client-go/v2/kv" 50 "github.com/KinWaiYuen/client-go/v2/metrics" 51 "github.com/KinWaiYuen/client-go/v2/tikvrpc" 52 "github.com/KinWaiYuen/client-go/v2/txnkv/txnlock" 53 "github.com/KinWaiYuen/client-go/v2/util" 54 "github.com/pingcap/errors" 55 "github.com/pingcap/kvproto/pkg/kvrpcpb" 56 "github.com/prometheus/client_golang/prometheus" 57 "go.uber.org/zap" 58 ) 59 60 type actionPessimisticLock struct { 61 *kv.LockCtx 62 } 63 type actionPessimisticRollback struct{} 64 65 var ( 66 _ twoPhaseCommitAction = actionPessimisticLock{} 67 _ twoPhaseCommitAction = actionPessimisticRollback{} 68 ) 69 70 func (actionPessimisticLock) String() string { 71 return "pessimistic_lock" 72 } 73 74 func (actionPessimisticLock) tiKVTxnRegionsNumHistogram() prometheus.Observer { 75 return metrics.TxnRegionsNumHistogramPessimisticLock 76 } 77 78 func (actionPessimisticRollback) String() string { 79 return "pessimistic_rollback" 80 } 81 82 func (actionPessimisticRollback) tiKVTxnRegionsNumHistogram() prometheus.Observer { 83 return metrics.TxnRegionsNumHistogramPessimisticRollback 84 } 85 86 func (action actionPessimisticLock) handleSingleBatch(c *twoPhaseCommitter, bo *retry.Backoffer, batch batchMutations) error { 87 m := batch.mutations 88 mutations := make([]*kvrpcpb.Mutation, m.Len()) 89 for i := 0; i < m.Len(); i++ { 90 mut := &kvrpcpb.Mutation{ 91 Op: kvrpcpb.Op_PessimisticLock, 92 Key: m.GetKey(i), 93 } 94 if c.txn.us.HasPresumeKeyNotExists(m.GetKey(i)) || (c.doingAmend && m.GetOp(i) == kvrpcpb.Op_Insert) { 95 mut.Assertion = kvrpcpb.Assertion_NotExist 96 } 97 mutations[i] = mut 98 } 99 elapsed := uint64(time.Since(c.txn.startTime) / time.Millisecond) 100 ttl := elapsed + atomic.LoadUint64(&ManagedLockTTL) 101 if _, err := util.EvalFailpoint("shortPessimisticLockTTL"); err == nil { 102 ttl = 1 103 keys := make([]string, 0, len(mutations)) 104 for _, m := range mutations { 105 keys = append(keys, hex.EncodeToString(m.Key)) 106 } 107 logutil.BgLogger().Info("[failpoint] injected lock ttl = 1 on pessimistic lock", 108 zap.Uint64("txnStartTS", c.startTS), zap.Strings("keys", keys)) 109 } 110 req := tikvrpc.NewRequest(tikvrpc.CmdPessimisticLock, &kvrpcpb.PessimisticLockRequest{ 111 Mutations: mutations, 112 PrimaryLock: c.primary(), 113 StartVersion: c.startTS, 114 ForUpdateTs: c.forUpdateTS, 115 LockTtl: ttl, 116 IsFirstLock: c.isFirstLock, 117 WaitTimeout: action.LockWaitTime(), 118 ReturnValues: action.ReturnValues, 119 MinCommitTs: c.forUpdateTS + 1, 120 }, kvrpcpb.Context{Priority: c.priority, SyncLog: c.syncLog, ResourceGroupTag: action.LockCtx.ResourceGroupTag, 121 MaxExecutionDurationMs: uint64(client.MaxWriteExecutionTime.Milliseconds())}) 122 lockWaitStartTime := action.WaitStartTime 123 for { 124 // if lockWaitTime set, refine the request `WaitTimeout` field based on timeout limit 125 if action.LockWaitTime() > 0 && action.LockWaitTime() != kv.LockAlwaysWait { 126 timeLeft := action.LockWaitTime() - (time.Since(lockWaitStartTime)).Milliseconds() 127 if timeLeft <= 0 { 128 req.PessimisticLock().WaitTimeout = kv.LockNoWait 129 } else { 130 req.PessimisticLock().WaitTimeout = timeLeft 131 } 132 } 133 if _, err := util.EvalFailpoint("PessimisticLockErrWriteConflict"); err == nil { 134 time.Sleep(300 * time.Millisecond) 135 return &tikverr.ErrWriteConflict{WriteConflict: nil} 136 } 137 startTime := time.Now() 138 resp, err := c.store.SendReq(bo, req, batch.region, client.ReadTimeoutShort) 139 if action.LockCtx.Stats != nil { 140 atomic.AddInt64(&action.LockCtx.Stats.LockRPCTime, int64(time.Since(startTime))) 141 atomic.AddInt64(&action.LockCtx.Stats.LockRPCCount, 1) 142 } 143 if err != nil { 144 return errors.Trace(err) 145 } 146 regionErr, err := resp.GetRegionError() 147 if err != nil { 148 return errors.Trace(err) 149 } 150 if regionErr != nil { 151 // For other region error and the fake region error, backoff because 152 // there's something wrong. 153 // For the real EpochNotMatch error, don't backoff. 154 if regionErr.GetEpochNotMatch() == nil || locate.IsFakeRegionError(regionErr) { 155 err = bo.Backoff(retry.BoRegionMiss, errors.New(regionErr.String())) 156 if err != nil { 157 return errors.Trace(err) 158 } 159 } 160 same, err := batch.relocate(bo, c.store.GetRegionCache()) 161 if err != nil { 162 return errors.Trace(err) 163 } 164 if same { 165 continue 166 } 167 err = c.pessimisticLockMutations(bo, action.LockCtx, batch.mutations) 168 return errors.Trace(err) 169 } 170 if resp.Resp == nil { 171 return errors.Trace(tikverr.ErrBodyMissing) 172 } 173 lockResp := resp.Resp.(*kvrpcpb.PessimisticLockResponse) 174 keyErrs := lockResp.GetErrors() 175 if len(keyErrs) == 0 { 176 if batch.isPrimary { 177 // After locking the primary key, we should protect the primary lock from expiring 178 // now in case locking the remaining keys take a long time. 179 c.run(c, action.LockCtx) 180 } 181 182 if action.ReturnValues { 183 action.ValuesLock.Lock() 184 for i, mutation := range mutations { 185 action.Values[string(mutation.Key)] = kv.ReturnedValue{Value: lockResp.Values[i]} 186 } 187 action.ValuesLock.Unlock() 188 } 189 return nil 190 } 191 var locks []*txnlock.Lock 192 for _, keyErr := range keyErrs { 193 // Check already exists error 194 if alreadyExist := keyErr.GetAlreadyExist(); alreadyExist != nil { 195 e := &tikverr.ErrKeyExist{AlreadyExist: alreadyExist} 196 return c.extractKeyExistsErr(e) 197 } 198 if deadlock := keyErr.Deadlock; deadlock != nil { 199 return &tikverr.ErrDeadlock{Deadlock: deadlock} 200 } 201 202 // Extract lock from key error 203 lock, err1 := txnlock.ExtractLockFromKeyErr(keyErr) 204 if err1 != nil { 205 return errors.Trace(err1) 206 } 207 locks = append(locks, lock) 208 } 209 // Because we already waited on tikv, no need to Backoff here. 210 // tikv default will wait 3s(also the maximum wait value) when lock error occurs 211 startTime = time.Now() 212 msBeforeTxnExpired, _, err := c.store.GetLockResolver().ResolveLocks(bo, 0, locks) 213 if err != nil { 214 return errors.Trace(err) 215 } 216 if action.LockCtx.Stats != nil { 217 atomic.AddInt64(&action.LockCtx.Stats.ResolveLockTime, int64(time.Since(startTime))) 218 } 219 220 // If msBeforeTxnExpired is not zero, it means there are still locks blocking us acquiring 221 // the pessimistic lock. We should return acquire fail with nowait set or timeout error if necessary. 222 if msBeforeTxnExpired > 0 { 223 if action.LockWaitTime() == kv.LockNoWait { 224 return tikverr.ErrLockAcquireFailAndNoWaitSet 225 } else if action.LockWaitTime() == kv.LockAlwaysWait { 226 // do nothing but keep wait 227 } else { 228 // the lockWaitTime is set, we should return wait timeout if we are still blocked by a lock 229 if time.Since(lockWaitStartTime).Milliseconds() >= action.LockWaitTime() { 230 return errors.Trace(tikverr.ErrLockWaitTimeout) 231 } 232 } 233 if action.LockCtx.PessimisticLockWaited != nil { 234 atomic.StoreInt32(action.LockCtx.PessimisticLockWaited, 1) 235 } 236 } 237 238 // Handle the killed flag when waiting for the pessimistic lock. 239 // When a txn runs into LockKeys() and backoff here, it has no chance to call 240 // executor.Next() and check the killed flag. 241 if action.Killed != nil { 242 // Do not reset the killed flag here! 243 // actionPessimisticLock runs on each region parallelly, we have to consider that 244 // the error may be dropped. 245 if atomic.LoadUint32(action.Killed) == 1 { 246 return errors.Trace(tikverr.ErrQueryInterrupted) 247 } 248 } 249 } 250 } 251 252 func (actionPessimisticRollback) handleSingleBatch(c *twoPhaseCommitter, bo *retry.Backoffer, batch batchMutations) error { 253 req := tikvrpc.NewRequest(tikvrpc.CmdPessimisticRollback, &kvrpcpb.PessimisticRollbackRequest{ 254 StartVersion: c.startTS, 255 ForUpdateTs: c.forUpdateTS, 256 Keys: batch.mutations.GetKeys(), 257 }) 258 req.MaxExecutionDurationMs = uint64(client.MaxWriteExecutionTime.Milliseconds()) 259 resp, err := c.store.SendReq(bo, req, batch.region, client.ReadTimeoutShort) 260 if err != nil { 261 return errors.Trace(err) 262 } 263 regionErr, err := resp.GetRegionError() 264 if err != nil { 265 return errors.Trace(err) 266 } 267 if regionErr != nil { 268 err = bo.Backoff(retry.BoRegionMiss, errors.New(regionErr.String())) 269 if err != nil { 270 return errors.Trace(err) 271 } 272 err = c.pessimisticRollbackMutations(bo, batch.mutations) 273 return errors.Trace(err) 274 } 275 return nil 276 } 277 278 func (c *twoPhaseCommitter) pessimisticLockMutations(bo *retry.Backoffer, lockCtx *kv.LockCtx, mutations CommitterMutations) error { 279 if c.sessionID > 0 { 280 if val, err := util.EvalFailpoint("beforePessimisticLock"); err == nil { 281 // Pass multiple instructions in one string, delimited by commas, to trigger multiple behaviors, like 282 // `return("delay,fail")`. Then they will be executed sequentially at once. 283 if v, ok := val.(string); ok { 284 for _, action := range strings.Split(v, ",") { 285 if action == "delay" { 286 duration := time.Duration(rand.Int63n(int64(time.Second) * 5)) 287 logutil.Logger(bo.GetCtx()).Info("[failpoint] injected delay at pessimistic lock", 288 zap.Uint64("txnStartTS", c.startTS), zap.Duration("duration", duration)) 289 time.Sleep(duration) 290 } else if action == "fail" { 291 logutil.Logger(bo.GetCtx()).Info("[failpoint] injected failure at pessimistic lock", 292 zap.Uint64("txnStartTS", c.startTS)) 293 return errors.New("injected failure at pessimistic lock") 294 } 295 } 296 } 297 } 298 } 299 return c.doActionOnMutations(bo, actionPessimisticLock{lockCtx}, mutations) 300 } 301 302 func (c *twoPhaseCommitter) pessimisticRollbackMutations(bo *retry.Backoffer, mutations CommitterMutations) error { 303 return c.doActionOnMutations(bo, actionPessimisticRollback{}, mutations) 304 }