github.com/whtcorpsinc/milevadb-prod@v0.0.0-20211104133533-f57f4be3b597/interlock/point_get.go (about) 1 // Copyright 2020 WHTCORPS INC, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 package interlock 15 16 import ( 17 "context" 18 19 "github.com/whtcorpsinc/BerolinaSQL/allegrosql" 20 "github.com/whtcorpsinc/BerolinaSQL/perceptron" 21 "github.com/whtcorpsinc/errors" 22 "github.com/whtcorpsinc/failpoint" 23 "github.com/whtcorpsinc/milevadb/blockcodec" 24 "github.com/whtcorpsinc/milevadb/causet" 25 "github.com/whtcorpsinc/milevadb/causet/blocks" 26 causetembedded "github.com/whtcorpsinc/milevadb/causet/embedded" 27 "github.com/whtcorpsinc/milevadb/causetstore/einsteindb" 28 "github.com/whtcorpsinc/milevadb/ekv" 29 "github.com/whtcorpsinc/milevadb/memex" 30 "github.com/whtcorpsinc/milevadb/soliton/chunk" 31 "github.com/whtcorpsinc/milevadb/soliton/codec" 32 "github.com/whtcorpsinc/milevadb/soliton/execdetails" 33 "github.com/whtcorpsinc/milevadb/soliton/rowcodec" 34 "github.com/whtcorpsinc/milevadb/stochastikctx" 35 "github.com/whtcorpsinc/milevadb/types" 36 ) 37 38 func (b *interlockBuilder) buildPointGet(p *causetembedded.PointGetCauset) InterlockingDirectorate { 39 if b.ctx.GetStochastikVars().IsPessimisticReadConsistency() { 40 if err := b.refreshForUFIDelateTSForRC(); err != nil { 41 b.err = err 42 return nil 43 } 44 } 45 startTS, err := b.getSnapshotTS() 46 if err != nil { 47 b.err = err 48 return nil 49 } 50 e := &PointGetInterlockingDirectorate{ 51 baseInterlockingDirectorate: newBaseInterlockingDirectorate(b.ctx, p.Schema(), p.ID()), 52 } 53 e.base().initCap = 1 54 e.base().maxChunkSize = 1 55 if p.Lock { 56 b.hasLock = true 57 } 58 e.Init(p, startTS) 59 return e 60 } 61 62 // PointGetInterlockingDirectorate executes point select query. 63 type PointGetInterlockingDirectorate struct { 64 baseInterlockingDirectorate 65 66 tblInfo *perceptron.BlockInfo 67 handle ekv.Handle 68 idxInfo *perceptron.IndexInfo 69 partInfo *perceptron.PartitionDefinition 70 idxKey ekv.Key 71 handleVal []byte 72 idxVals []types.Causet 73 startTS uint64 74 txn ekv.Transaction 75 snapshot ekv.Snapshot 76 done bool 77 dagger bool 78 lockWaitTime int64 79 rowCausetDecoder *rowcodec.ChunkCausetDecoder 80 81 defCausumns []*perceptron.DeferredCausetInfo 82 // virtualDeferredCausetIndex records all the indices of virtual defCausumns and sort them in definition 83 // to make sure we can compute the virtual defCausumn in right order. 84 virtualDeferredCausetIndex []int 85 86 // virtualDeferredCausetRetFieldTypes records the RetFieldTypes of virtual defCausumns. 87 virtualDeferredCausetRetFieldTypes []*types.FieldType 88 89 stats *runtimeStatsWithSnapshot 90 } 91 92 // Init set fields needed for PointGetInterlockingDirectorate reuse, this does NOT change baseInterlockingDirectorate field 93 func (e *PointGetInterlockingDirectorate) Init(p *causetembedded.PointGetCauset, startTs uint64) { 94 causetDecoder := NewEventCausetDecoder(e.ctx, p.Schema(), p.TblInfo) 95 e.tblInfo = p.TblInfo 96 e.handle = p.Handle 97 e.idxInfo = p.IndexInfo 98 e.idxVals = p.IndexValues 99 e.startTS = startTs 100 e.done = false 101 e.dagger = p.Lock 102 e.lockWaitTime = p.LockWaitTime 103 e.rowCausetDecoder = causetDecoder 104 e.partInfo = p.PartitionInfo 105 e.defCausumns = p.DeferredCausets 106 e.buildVirtualDeferredCausetInfo() 107 } 108 109 // buildVirtualDeferredCausetInfo saves virtual defCausumn indices and sort them in definition order 110 func (e *PointGetInterlockingDirectorate) buildVirtualDeferredCausetInfo() { 111 e.virtualDeferredCausetIndex = buildVirtualDeferredCausetIndex(e.Schema(), e.defCausumns) 112 if len(e.virtualDeferredCausetIndex) > 0 { 113 e.virtualDeferredCausetRetFieldTypes = make([]*types.FieldType, len(e.virtualDeferredCausetIndex)) 114 for i, idx := range e.virtualDeferredCausetIndex { 115 e.virtualDeferredCausetRetFieldTypes[i] = e.schemaReplicant.DeferredCausets[idx].RetType 116 } 117 } 118 } 119 120 // Open implements the InterlockingDirectorate interface. 121 func (e *PointGetInterlockingDirectorate) Open(context.Context) error { 122 txnCtx := e.ctx.GetStochastikVars().TxnCtx 123 snapshotTS := e.startTS 124 if e.dagger { 125 snapshotTS = txnCtx.GetForUFIDelateTS() 126 } 127 var err error 128 e.txn, err = e.ctx.Txn(false) 129 if err != nil { 130 return err 131 } 132 if e.txn.Valid() && txnCtx.StartTS == txnCtx.GetForUFIDelateTS() { 133 e.snapshot = e.txn.GetSnapshot() 134 } else { 135 e.snapshot, err = e.ctx.GetStore().GetSnapshot(ekv.Version{Ver: snapshotTS}) 136 if err != nil { 137 return err 138 } 139 } 140 if e.runtimeStats != nil { 141 snapshotStats := &einsteindb.SnapshotRuntimeStats{} 142 e.stats = &runtimeStatsWithSnapshot{ 143 SnapshotRuntimeStats: snapshotStats, 144 } 145 e.snapshot.SetOption(ekv.DefCauslectRuntimeStats, snapshotStats) 146 e.ctx.GetStochastikVars().StmtCtx.RuntimeStatsDefCausl.RegisterStats(e.id, e.stats) 147 } 148 if e.ctx.GetStochastikVars().GetReplicaRead().IsFollowerRead() { 149 e.snapshot.SetOption(ekv.ReplicaRead, ekv.ReplicaReadFollower) 150 } 151 e.snapshot.SetOption(ekv.TaskID, e.ctx.GetStochastikVars().StmtCtx.TaskID) 152 return nil 153 } 154 155 // Close implements the InterlockingDirectorate interface. 156 func (e *PointGetInterlockingDirectorate) Close() error { 157 if e.runtimeStats != nil && e.snapshot != nil { 158 e.snapshot.DelOption(ekv.DefCauslectRuntimeStats) 159 } 160 e.done = false 161 return nil 162 } 163 164 // Next implements the InterlockingDirectorate interface. 165 func (e *PointGetInterlockingDirectorate) Next(ctx context.Context, req *chunk.Chunk) error { 166 req.Reset() 167 if e.done { 168 return nil 169 } 170 e.done = true 171 172 var tblID int64 173 var err error 174 if e.partInfo != nil { 175 tblID = e.partInfo.ID 176 } else { 177 tblID = e.tblInfo.ID 178 } 179 if e.idxInfo != nil { 180 if isCommonHandleRead(e.tblInfo, e.idxInfo) { 181 handleBytes, err := EncodeUniqueIndexValuesForKey(e.ctx, e.tblInfo, e.idxInfo, e.idxVals) 182 if err != nil { 183 return err 184 } 185 e.handle, err = ekv.NewCommonHandle(handleBytes) 186 if err != nil { 187 return err 188 } 189 } else { 190 e.idxKey, err = EncodeUniqueIndexKey(e.ctx, e.tblInfo, e.idxInfo, e.idxVals, tblID) 191 if err != nil && !ekv.ErrNotExist.Equal(err) { 192 return err 193 } 194 195 e.handleVal, err = e.get(ctx, e.idxKey) 196 if err != nil { 197 if !ekv.ErrNotExist.Equal(err) { 198 return err 199 } 200 } 201 if len(e.handleVal) == 0 { 202 // handle is not found, try dagger the index key if isolation level is not read consistency 203 if e.ctx.GetStochastikVars().IsPessimisticReadConsistency() { 204 return nil 205 } 206 return e.lockKeyIfNeeded(ctx, e.idxKey) 207 } 208 var iv ekv.Handle 209 iv, err = blockcodec.DecodeHandleInUniqueIndexValue(e.handleVal, e.tblInfo.IsCommonHandle) 210 if err != nil { 211 return err 212 } 213 e.handle = iv 214 215 // The injection is used to simulate following scenario: 216 // 1. Stochastik A create a point get query but pause before second time `GET` ekv from backend 217 // 2. Stochastik B create an UFIDelATE query to uFIDelate the record that will be obtained in step 1 218 // 3. Then point get retrieve data from backend after step 2 finished 219 // 4. Check the result 220 failpoint.InjectContext(ctx, "pointGetRepeablockReadTest-step1", func() { 221 if ch, ok := ctx.Value("pointGetRepeablockReadTest").(chan struct{}); ok { 222 // Make `UFIDelATE` continue 223 close(ch) 224 } 225 // Wait `UFIDelATE` finished 226 failpoint.InjectContext(ctx, "pointGetRepeablockReadTest-step2", nil) 227 }) 228 } 229 } 230 231 key := blockcodec.EncodeEventKeyWithHandle(tblID, e.handle) 232 val, err := e.getAndLock(ctx, key) 233 if err != nil { 234 return err 235 } 236 if len(val) == 0 { 237 if e.idxInfo != nil && !isCommonHandleRead(e.tblInfo, e.idxInfo) { 238 return ekv.ErrNotExist.GenWithStack("inconsistent extra index %s, handle %d not found in causet", 239 e.idxInfo.Name.O, e.handle) 240 } 241 return nil 242 } 243 err = DecodeEventValToChunk(e.base().ctx, e.schemaReplicant, e.tblInfo, e.handle, val, req, e.rowCausetDecoder) 244 if err != nil { 245 return err 246 } 247 248 err = FillVirtualDeferredCausetValue(e.virtualDeferredCausetRetFieldTypes, e.virtualDeferredCausetIndex, 249 e.schemaReplicant, e.defCausumns, e.ctx, req) 250 if err != nil { 251 return err 252 } 253 return nil 254 } 255 256 func (e *PointGetInterlockingDirectorate) getAndLock(ctx context.Context, key ekv.Key) (val []byte, err error) { 257 if e.ctx.GetStochastikVars().IsPessimisticReadConsistency() { 258 // Only Lock the exist keys in RC isolation. 259 val, err = e.get(ctx, key) 260 if err != nil { 261 if !ekv.ErrNotExist.Equal(err) { 262 return nil, err 263 } 264 return nil, nil 265 } 266 err = e.lockKeyIfNeeded(ctx, key) 267 if err != nil { 268 return nil, err 269 } 270 return val, nil 271 } 272 // Lock the key before get in RR isolation, then get will get the value from the cache. 273 err = e.lockKeyIfNeeded(ctx, key) 274 if err != nil { 275 return nil, err 276 } 277 val, err = e.get(ctx, key) 278 if err != nil { 279 if !ekv.ErrNotExist.Equal(err) { 280 return nil, err 281 } 282 return nil, nil 283 } 284 return val, nil 285 } 286 287 func (e *PointGetInterlockingDirectorate) lockKeyIfNeeded(ctx context.Context, key []byte) error { 288 if e.dagger { 289 seVars := e.ctx.GetStochastikVars() 290 lockCtx := newLockCtx(seVars, e.lockWaitTime) 291 lockCtx.ReturnValues = true 292 lockCtx.Values = map[string]ekv.ReturnedValue{} 293 err := doLockKeys(ctx, e.ctx, lockCtx, key) 294 if err != nil { 295 return err 296 } 297 lockCtx.ValuesLock.Lock() 298 defer lockCtx.ValuesLock.Unlock() 299 for key, val := range lockCtx.Values { 300 if !val.AlreadyLocked { 301 seVars.TxnCtx.SetPessimisticLockCache(ekv.Key(key), val.Value) 302 } 303 } 304 if len(e.handleVal) > 0 { 305 seVars.TxnCtx.SetPessimisticLockCache(e.idxKey, e.handleVal) 306 } 307 } 308 return nil 309 } 310 311 // get will first try to get from txn buffer, then check the pessimistic dagger cache, 312 // then the causetstore. Ekv.ErrNotExist will be returned if key is not found 313 func (e *PointGetInterlockingDirectorate) get(ctx context.Context, key ekv.Key) ([]byte, error) { 314 if len(key) == 0 { 315 return nil, ekv.ErrNotExist 316 } 317 if e.txn.Valid() && !e.txn.IsReadOnly() { 318 // We cannot use txn.Get directly here because the snapshot in txn and the snapshot of e.snapshot may be 319 // different for pessimistic transaction. 320 val, err := e.txn.GetMemBuffer().Get(ctx, key) 321 if err == nil { 322 return val, err 323 } 324 if !ekv.IsErrNotFound(err) { 325 return nil, err 326 } 327 // key does not exist in mem buffer, check the dagger cache 328 var ok bool 329 val, ok = e.ctx.GetStochastikVars().TxnCtx.GetKeyInPessimisticLockCache(key) 330 if ok { 331 return val, nil 332 } 333 // fallthrough to snapshot get. 334 } 335 return e.snapshot.Get(ctx, key) 336 } 337 338 // EncodeUniqueIndexKey encodes a unique index key. 339 func EncodeUniqueIndexKey(ctx stochastikctx.Context, tblInfo *perceptron.BlockInfo, idxInfo *perceptron.IndexInfo, idxVals []types.Causet, tID int64) (_ []byte, err error) { 340 encodedIdxVals, err := EncodeUniqueIndexValuesForKey(ctx, tblInfo, idxInfo, idxVals) 341 if err != nil { 342 return nil, err 343 } 344 return blockcodec.EncodeIndexSeekKey(tID, idxInfo.ID, encodedIdxVals), nil 345 } 346 347 // EncodeUniqueIndexValuesForKey encodes unique index values for a key. 348 func EncodeUniqueIndexValuesForKey(ctx stochastikctx.Context, tblInfo *perceptron.BlockInfo, idxInfo *perceptron.IndexInfo, idxVals []types.Causet) (_ []byte, err error) { 349 sc := ctx.GetStochastikVars().StmtCtx 350 for i := range idxVals { 351 defCausInfo := tblInfo.DeferredCausets[idxInfo.DeferredCausets[i].Offset] 352 // causet.CastValue will append 0x0 if the string value's length is smaller than the BINARY defCausumn's length. 353 // So we don't use CastValue for string value for now. 354 // TODO: merge two if branch. 355 if defCausInfo.Tp == allegrosql.TypeString || defCausInfo.Tp == allegrosql.TypeVarString || defCausInfo.Tp == allegrosql.TypeVarchar { 356 var str string 357 str, err = idxVals[i].ToString() 358 idxVals[i].SetString(str, defCausInfo.FieldType.DefCauslate) 359 } else { 360 idxVals[i], err = causet.CastValue(ctx, idxVals[i], defCausInfo, true, false) 361 if types.ErrOverflow.Equal(err) { 362 return nil, ekv.ErrNotExist 363 } 364 } 365 if err != nil { 366 return nil, err 367 } 368 } 369 370 encodedIdxVals, err := codec.EncodeKey(sc, nil, idxVals...) 371 if err != nil { 372 return nil, err 373 } 374 return encodedIdxVals, nil 375 } 376 377 // DecodeEventValToChunk decodes event value into chunk checking event format used. 378 func DecodeEventValToChunk(sctx stochastikctx.Context, schemaReplicant *memex.Schema, tblInfo *perceptron.BlockInfo, 379 handle ekv.Handle, rowVal []byte, chk *chunk.Chunk, rd *rowcodec.ChunkCausetDecoder) error { 380 if rowcodec.IsNewFormat(rowVal) { 381 return rd.DecodeToChunk(rowVal, handle, chk) 382 } 383 return decodeOldEventValToChunk(sctx, schemaReplicant, tblInfo, handle, rowVal, chk) 384 } 385 386 func decodeOldEventValToChunk(sctx stochastikctx.Context, schemaReplicant *memex.Schema, tblInfo *perceptron.BlockInfo, handle ekv.Handle, 387 rowVal []byte, chk *chunk.Chunk) error { 388 pkDefCauss := blocks.TryGetCommonPkDeferredCausetIds(tblInfo) 389 defCausID2CutPos := make(map[int64]int, schemaReplicant.Len()) 390 for _, defCaus := range schemaReplicant.DeferredCausets { 391 if _, ok := defCausID2CutPos[defCaus.ID]; !ok { 392 defCausID2CutPos[defCaus.ID] = len(defCausID2CutPos) 393 } 394 } 395 cutVals, err := blockcodec.CutEventNew(rowVal, defCausID2CutPos) 396 if err != nil { 397 return err 398 } 399 if cutVals == nil { 400 cutVals = make([][]byte, len(defCausID2CutPos)) 401 } 402 causetDecoder := codec.NewCausetDecoder(chk, sctx.GetStochastikVars().Location()) 403 for i, defCaus := range schemaReplicant.DeferredCausets { 404 // fill the virtual defCausumn value after event calculation 405 if defCaus.VirtualExpr != nil { 406 chk.AppendNull(i) 407 continue 408 } 409 ok, err := tryDecodeFromHandle(tblInfo, i, defCaus, handle, chk, causetDecoder, pkDefCauss) 410 if err != nil { 411 return err 412 } 413 if ok { 414 continue 415 } 416 cutPos := defCausID2CutPos[defCaus.ID] 417 if len(cutVals[cutPos]) == 0 { 418 defCausInfo := getDefCausInfoByID(tblInfo, defCaus.ID) 419 d, err1 := causet.GetDefCausOriginDefaultValue(sctx, defCausInfo) 420 if err1 != nil { 421 return err1 422 } 423 chk.AppendCauset(i, &d) 424 continue 425 } 426 _, err = causetDecoder.DecodeOne(cutVals[cutPos], i, defCaus.RetType) 427 if err != nil { 428 return err 429 } 430 } 431 return nil 432 } 433 434 func tryDecodeFromHandle(tblInfo *perceptron.BlockInfo, i int, defCaus *memex.DeferredCauset, handle ekv.Handle, chk *chunk.Chunk, causetDecoder *codec.CausetDecoder, pkDefCauss []int64) (bool, error) { 435 if tblInfo.PKIsHandle && allegrosql.HasPriKeyFlag(defCaus.RetType.Flag) { 436 chk.AppendInt64(i, handle.IntValue()) 437 return true, nil 438 } 439 if defCaus.ID == perceptron.ExtraHandleID { 440 chk.AppendInt64(i, handle.IntValue()) 441 return true, nil 442 } 443 // Try to decode common handle. 444 if allegrosql.HasPriKeyFlag(defCaus.RetType.Flag) { 445 for i, hid := range pkDefCauss { 446 if defCaus.ID == hid { 447 _, err := causetDecoder.DecodeOne(handle.EncodedDefCaus(i), i, defCaus.RetType) 448 if err != nil { 449 return false, errors.Trace(err) 450 } 451 return true, nil 452 } 453 } 454 } 455 return false, nil 456 } 457 458 func getDefCausInfoByID(tbl *perceptron.BlockInfo, defCausID int64) *perceptron.DeferredCausetInfo { 459 for _, defCaus := range tbl.DeferredCausets { 460 if defCaus.ID == defCausID { 461 return defCaus 462 } 463 } 464 return nil 465 } 466 467 type runtimeStatsWithSnapshot struct { 468 *einsteindb.SnapshotRuntimeStats 469 } 470 471 func (e *runtimeStatsWithSnapshot) String() string { 472 if e.SnapshotRuntimeStats != nil { 473 return e.SnapshotRuntimeStats.String() 474 } 475 return "" 476 } 477 478 // Clone implements the RuntimeStats interface. 479 func (e *runtimeStatsWithSnapshot) Clone() execdetails.RuntimeStats { 480 newRs := &runtimeStatsWithSnapshot{} 481 if e.SnapshotRuntimeStats != nil { 482 snapshotStats := e.SnapshotRuntimeStats.Clone() 483 newRs.SnapshotRuntimeStats = snapshotStats.(*einsteindb.SnapshotRuntimeStats) 484 } 485 return newRs 486 } 487 488 // Merge implements the RuntimeStats interface. 489 func (e *runtimeStatsWithSnapshot) Merge(other execdetails.RuntimeStats) { 490 tmp, ok := other.(*runtimeStatsWithSnapshot) 491 if !ok { 492 return 493 } 494 if tmp.SnapshotRuntimeStats != nil { 495 if e.SnapshotRuntimeStats == nil { 496 snapshotStats := tmp.SnapshotRuntimeStats.Clone() 497 e.SnapshotRuntimeStats = snapshotStats.(*einsteindb.SnapshotRuntimeStats) 498 return 499 } 500 e.SnapshotRuntimeStats.Merge(tmp.SnapshotRuntimeStats) 501 } 502 } 503 504 // Tp implements the RuntimeStats interface. 505 func (e *runtimeStatsWithSnapshot) Tp() int { 506 return execdetails.TpRuntimeStatsWithSnapshot 507 }