github.com/whtcorpsinc/MilevaDB-Prod@v0.0.0-20211104133533-f57f4be3b597/causetstore/stochastikctx/variable/session.go (about) 1 // Copyright 2020 WHTCORPS INC, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 package variable 15 16 import ( 17 "bytes" 18 "crypto/tls" 19 "encoding/binary" 20 "fmt" 21 "math" 22 "math/rand" 23 "sort" 24 "strconv" 25 "strings" 26 "sync" 27 "sync/atomic" 28 "time" 29 30 "github.com/klauspost/cpuid" 31 "github.com/twmb/murmur3" 32 "github.com/whtcorpsinc/BerolinaSQL/allegrosql" 33 "github.com/whtcorpsinc/BerolinaSQL/ast" 34 "github.com/whtcorpsinc/BerolinaSQL/auth" 35 "github.com/whtcorpsinc/BerolinaSQL/charset" 36 "github.com/whtcorpsinc/BerolinaSQL/terror" 37 "github.com/whtcorpsinc/errors" 38 pumpcli "github.com/whtcorpsinc/milevadb-tools/milevadb-binlog/pump_client" 39 "github.com/whtcorpsinc/milevadb/causetstore/einsteindb/oracle" 40 "github.com/whtcorpsinc/milevadb/config" 41 "github.com/whtcorpsinc/milevadb/ekv" 42 "github.com/whtcorpsinc/milevadb/metrics" 43 "github.com/whtcorpsinc/milevadb/soliton/chunk" 44 "github.com/whtcorpsinc/milevadb/soliton/defCauslate" 45 "github.com/whtcorpsinc/milevadb/soliton/execdetails" 46 "github.com/whtcorpsinc/milevadb/soliton/logutil" 47 "github.com/whtcorpsinc/milevadb/soliton/rowcodec" 48 "github.com/whtcorpsinc/milevadb/soliton/storeutil" 49 "github.com/whtcorpsinc/milevadb/soliton/stringutil" 50 "github.com/whtcorpsinc/milevadb/soliton/timeutil" 51 "github.com/whtcorpsinc/milevadb/spacetime/autoid" 52 "github.com/whtcorpsinc/milevadb/stochastikctx/stmtctx" 53 "github.com/whtcorpsinc/milevadb/types" 54 ) 55 56 var preparedStmtCount int64 57 58 // RetryInfo saves retry information. 59 type RetryInfo struct { 60 Retrying bool 61 DroppedPreparedStmtIDs []uint32 62 autoIncrementIDs retryInfoAutoIDs 63 autoRandomIDs retryInfoAutoIDs 64 } 65 66 // Clean does some clean work. 67 func (r *RetryInfo) Clean() { 68 r.autoIncrementIDs.clean() 69 r.autoRandomIDs.clean() 70 71 if len(r.DroppedPreparedStmtIDs) > 0 { 72 r.DroppedPreparedStmtIDs = r.DroppedPreparedStmtIDs[:0] 73 } 74 } 75 76 // ResetOffset resets the current retry offset. 77 func (r *RetryInfo) ResetOffset() { 78 r.autoIncrementIDs.resetOffset() 79 r.autoRandomIDs.resetOffset() 80 } 81 82 // AddAutoIncrementID adds id to autoIncrementIDs. 83 func (r *RetryInfo) AddAutoIncrementID(id int64) { 84 r.autoIncrementIDs.autoIDs = append(r.autoIncrementIDs.autoIDs, id) 85 } 86 87 // GetCurrAutoIncrementID gets current autoIncrementID. 88 func (r *RetryInfo) GetCurrAutoIncrementID() (int64, error) { 89 return r.autoIncrementIDs.getCurrent() 90 } 91 92 // AddAutoRandomID adds id to autoRandomIDs. 93 func (r *RetryInfo) AddAutoRandomID(id int64) { 94 r.autoRandomIDs.autoIDs = append(r.autoRandomIDs.autoIDs, id) 95 } 96 97 // GetCurrAutoRandomID gets current AutoRandomID. 98 func (r *RetryInfo) GetCurrAutoRandomID() (int64, error) { 99 return r.autoRandomIDs.getCurrent() 100 } 101 102 type retryInfoAutoIDs struct { 103 currentOffset int 104 autoIDs []int64 105 } 106 107 func (r *retryInfoAutoIDs) resetOffset() { 108 r.currentOffset = 0 109 } 110 111 func (r *retryInfoAutoIDs) clean() { 112 r.currentOffset = 0 113 if len(r.autoIDs) > 0 { 114 r.autoIDs = r.autoIDs[:0] 115 } 116 } 117 118 func (r *retryInfoAutoIDs) getCurrent() (int64, error) { 119 if r.currentOffset >= len(r.autoIDs) { 120 return 0, errCantGetValidID 121 } 122 id := r.autoIDs[r.currentOffset] 123 r.currentOffset++ 124 return id, nil 125 } 126 127 // stmtFuture is used to async get timestamp for memex. 128 type stmtFuture struct { 129 future oracle.Future 130 cachedTS uint64 131 } 132 133 // TransactionContext is used to causetstore variables that has transaction scope. 134 type TransactionContext struct { 135 forUFIDelateTS uint64 136 stmtFuture oracle.Future 137 Binlog interface{} 138 SchemaReplicant interface{} 139 History interface{} 140 SchemaVersion int64 141 StartTS uint64 142 143 // ShardStep indicates the max size of continuous rowid shard in one transaction. 144 ShardStep int 145 shardRemain int 146 currentShard int64 147 shardRand *rand.Rand 148 149 // BlockDeltaMap is used in the schemaReplicant validator for DBS changes in one causet not to causet others. 150 // It's also used in the statistias uFIDelating. 151 // Note: for the partitionted causet, it stores all the partition IDs. 152 BlockDeltaMap map[int64]BlockDelta 153 154 // unchangedRowKeys is used to causetstore the unchanged rows that needs to dagger for pessimistic transaction. 155 unchangedRowKeys map[string]struct{} 156 157 // pessimisticLockCache is the cache for pessimistic locked keys, 158 // The value never changes during the transaction. 159 pessimisticLockCache map[string][]byte 160 PessimisticCacheHit int 161 162 // CreateTime For metrics. 163 CreateTime time.Time 164 StatementCount int 165 CouldRetry bool 166 IsPessimistic bool 167 Isolation string 168 LockExpire uint32 169 ForUFIDelate uint32 170 } 171 172 // GetShard returns the shard prefix for the next `count` rowids. 173 func (tc *TransactionContext) GetShard(shardRowIDBits uint64, typeBitsLength uint64, reserveSignBit bool, count int) int64 { 174 if shardRowIDBits == 0 { 175 return 0 176 } 177 if tc.shardRand == nil { 178 tc.shardRand = rand.New(rand.NewSource(int64(tc.StartTS))) 179 } 180 if tc.shardRemain <= 0 { 181 tc.uFIDelateShard() 182 tc.shardRemain = tc.ShardStep 183 } 184 tc.shardRemain -= count 185 186 var signBitLength uint64 187 if reserveSignBit { 188 signBitLength = 1 189 } 190 return (tc.currentShard & (1<<shardRowIDBits - 1)) << (typeBitsLength - shardRowIDBits - signBitLength) 191 } 192 193 func (tc *TransactionContext) uFIDelateShard() { 194 var buf [8]byte 195 binary.LittleEndian.PutUint64(buf[:], tc.shardRand.Uint64()) 196 tc.currentShard = int64(murmur3.Sum32(buf[:])) 197 } 198 199 // AddUnchangedRowKey adds an unchanged event key in uFIDelate memex for pessimistic dagger. 200 func (tc *TransactionContext) AddUnchangedRowKey(key []byte) { 201 if tc.unchangedRowKeys == nil { 202 tc.unchangedRowKeys = map[string]struct{}{} 203 } 204 tc.unchangedRowKeys[string(key)] = struct{}{} 205 } 206 207 // DefCauslectUnchangedRowKeys defCauslects unchanged event keys for pessimistic dagger. 208 func (tc *TransactionContext) DefCauslectUnchangedRowKeys(buf []ekv.Key) []ekv.Key { 209 for key := range tc.unchangedRowKeys { 210 buf = append(buf, ekv.Key(key)) 211 } 212 tc.unchangedRowKeys = nil 213 return buf 214 } 215 216 // UFIDelateDeltaForBlock uFIDelates the delta info for some causet. 217 func (tc *TransactionContext) UFIDelateDeltaForBlock(physicalBlockID int64, delta int64, count int64, defCausSize map[int64]int64) { 218 if tc.BlockDeltaMap == nil { 219 tc.BlockDeltaMap = make(map[int64]BlockDelta) 220 } 221 item := tc.BlockDeltaMap[physicalBlockID] 222 if item.DefCausSize == nil && defCausSize != nil { 223 item.DefCausSize = make(map[int64]int64, len(defCausSize)) 224 } 225 item.Delta += delta 226 item.Count += count 227 for key, val := range defCausSize { 228 item.DefCausSize[key] += val 229 } 230 tc.BlockDeltaMap[physicalBlockID] = item 231 } 232 233 // GetKeyInPessimisticLockCache gets a key in pessimistic dagger cache. 234 func (tc *TransactionContext) GetKeyInPessimisticLockCache(key ekv.Key) (val []byte, ok bool) { 235 if tc.pessimisticLockCache == nil { 236 return nil, false 237 } 238 val, ok = tc.pessimisticLockCache[string(key)] 239 if ok { 240 tc.PessimisticCacheHit++ 241 } 242 return 243 } 244 245 // SetPessimisticLockCache sets a key value pair into pessimistic dagger cache. 246 func (tc *TransactionContext) SetPessimisticLockCache(key ekv.Key, val []byte) { 247 if tc.pessimisticLockCache == nil { 248 tc.pessimisticLockCache = map[string][]byte{} 249 } 250 tc.pessimisticLockCache[string(key)] = val 251 } 252 253 // Cleanup clears up transaction info that no longer use. 254 func (tc *TransactionContext) Cleanup() { 255 // tc.SchemaReplicant = nil; we cannot do it now, because some operation like handleFieldList depend on this. 256 tc.Binlog = nil 257 tc.History = nil 258 tc.BlockDeltaMap = nil 259 tc.pessimisticLockCache = nil 260 } 261 262 // ClearDelta clears the delta map. 263 func (tc *TransactionContext) ClearDelta() { 264 tc.BlockDeltaMap = nil 265 } 266 267 // GetForUFIDelateTS returns the ts for uFIDelate. 268 func (tc *TransactionContext) GetForUFIDelateTS() uint64 { 269 if tc.forUFIDelateTS > tc.StartTS { 270 return tc.forUFIDelateTS 271 } 272 return tc.StartTS 273 } 274 275 // SetForUFIDelateTS sets the ts for uFIDelate. 276 func (tc *TransactionContext) SetForUFIDelateTS(forUFIDelateTS uint64) { 277 if forUFIDelateTS > tc.forUFIDelateTS { 278 tc.forUFIDelateTS = forUFIDelateTS 279 } 280 } 281 282 // SetStmtFutureForRC sets the stmtFuture . 283 func (tc *TransactionContext) SetStmtFutureForRC(future oracle.Future) { 284 tc.stmtFuture = future 285 } 286 287 // GetStmtFutureForRC gets the stmtFuture. 288 func (tc *TransactionContext) GetStmtFutureForRC() oracle.Future { 289 return tc.stmtFuture 290 } 291 292 // WriteStmtBufs can be used by insert/replace/delete/uFIDelate memex. 293 // TODO: use a common memory pool to replace this. 294 type WriteStmtBufs struct { 295 // RowValBuf is used by blockcodec.EncodeRow, to reduce runtime.growslice. 296 RowValBuf []byte 297 // AddRowValues use to causetstore temp insert rows value, to reduce memory allocations when importing data. 298 AddRowValues []types.Causet 299 300 // IndexValsBuf is used by index.FetchValues 301 IndexValsBuf []types.Causet 302 // IndexKeyBuf is used by index.GenIndexKey 303 IndexKeyBuf []byte 304 } 305 306 func (ib *WriteStmtBufs) clean() { 307 ib.RowValBuf = nil 308 ib.AddRowValues = nil 309 ib.IndexValsBuf = nil 310 ib.IndexKeyBuf = nil 311 } 312 313 // BlockSnapshot represents a data snapshot of the causet contained in `information_schema`. 314 type BlockSnapshot struct { 315 Rows [][]types.Causet 316 Err error 317 } 318 319 type txnIsolationLevelOneShotState uint 320 321 // RewritePhaseInfo records some information about the rewrite phase 322 type RewritePhaseInfo struct { 323 // DurationRewrite is the duration of rewriting the ALLEGROALLEGROSQL. 324 DurationRewrite time.Duration 325 326 // DurationPreprocessSubQuery is the duration of pre-processing sub-queries. 327 DurationPreprocessSubQuery time.Duration 328 329 // PreprocessSubQueries is the number of pre-processed sub-queries. 330 PreprocessSubQueries int 331 } 332 333 // Reset resets all fields in RewritePhaseInfo. 334 func (r *RewritePhaseInfo) Reset() { 335 r.DurationRewrite = 0 336 r.DurationPreprocessSubQuery = 0 337 r.PreprocessSubQueries = 0 338 } 339 340 const ( 341 // oneShotDef means default, that is tx_isolation_one_shot not set. 342 oneShotDef txnIsolationLevelOneShotState = iota 343 // oneShotSet means it's set in current transaction. 344 oneShotSet 345 // onsShotUse means it should be used in current transaction. 346 oneShotUse 347 ) 348 349 // StochastikVars is to handle user-defined or global variables in the current stochastik. 350 type StochastikVars struct { 351 Concurrency 352 MemQuota 353 BatchSize 354 // DMLBatchSize indicates the number of rows batch-committed for a memex. 355 // It will be used when using LOAD DATA or BatchInsert or BatchDelete is on. 356 DMLBatchSize int 357 RetryLimit int64 358 DisableTxnAutoRetry bool 359 // UsersLock is a dagger for user defined variables. 360 UsersLock sync.RWMutex 361 // Users are user defined variables. 362 Users map[string]types.Causet 363 // systems variables, don't modify it directly, use GetSystemVar/SetSystemVar method. 364 systems map[string]string 365 // SysWarningCount is the system variable "warning_count", because it is on the hot path, so we extract it from the systems 366 SysWarningCount int 367 // SysErrorCount is the system variable "error_count", because it is on the hot path, so we extract it from the systems 368 SysErrorCount uint16 369 // PreparedStmts stores prepared memex. 370 PreparedStmts map[uint32]interface{} 371 PreparedStmtNameToID map[string]uint32 372 // preparedStmtID is id of prepared memex. 373 preparedStmtID uint32 374 // PreparedParams params for prepared memexs 375 PreparedParams PreparedParams 376 377 // ActiveRoles stores active roles for current user 378 ActiveRoles []*auth.RoleIdentity 379 380 RetryInfo *RetryInfo 381 // TxnCtx Should be reset on transaction finished. 382 TxnCtx *TransactionContext 383 384 // KVVars is the variables for KV storage. 385 KVVars *ekv.Variables 386 387 // txnIsolationLevelOneShot is used to implements "set transaction isolation level ..." 388 txnIsolationLevelOneShot struct { 389 state txnIsolationLevelOneShotState 390 value string 391 } 392 393 // Status stands for the stochastik status. e.g. in transaction or not, auto commit is on or off, and so on. 394 Status uint16 395 396 // ClientCapability is client's capability. 397 ClientCapability uint32 398 399 // TLSConnectionState is the TLS connection state (nil if not using TLS). 400 TLSConnectionState *tls.ConnectionState 401 402 // ConnectionID is the connection id of the current stochastik. 403 ConnectionID uint64 404 405 // CausetID is the unique id of logical and physical plan. 406 CausetID int 407 408 // CausetDeferredCausetID is the unique id for defCausumn when building plan. 409 CausetDeferredCausetID int64 410 411 // User is the user identity with which the stochastik login. 412 User *auth.UserIdentity 413 414 // CurrentDB is the default database of this stochastik. 415 CurrentDB string 416 417 // CurrentDBChanged indicates if the CurrentDB has been uFIDelated, and if it is we should print it into 418 // the slow log to make it be compatible with MyALLEGROSQL, https://github.com/whtcorpsinc/milevadb/issues/17846. 419 CurrentDBChanged bool 420 421 // StrictALLEGROSQLMode indicates if the stochastik is in strict mode. 422 StrictALLEGROSQLMode bool 423 424 // CommonGlobalLoaded indicates if common global variable has been loaded for this stochastik. 425 CommonGlobalLoaded bool 426 427 // InRestrictedALLEGROSQL indicates if the stochastik is handling restricted ALLEGROALLEGROSQL execution. 428 InRestrictedALLEGROSQL bool 429 430 // SnapshotTS is used for reading history data. For simplicity, SnapshotTS only supports allegrosql request. 431 SnapshotTS uint64 432 433 // SnapshotschemaReplicant is used with SnapshotTS, when the schemaReplicant version at snapshotTS less than current schemaReplicant 434 // version, we load an old version schemaReplicant for query. 435 SnapshotschemaReplicant interface{} 436 437 // BinlogClient is used to write binlog. 438 BinlogClient *pumpcli.PumpsClient 439 440 // GlobalVarsAccessor is used to set and get global variables. 441 GlobalVarsAccessor GlobalVarAccessor 442 443 // LastFoundRows is the number of found rows of last query memex 444 LastFoundRows uint64 445 446 // StmtCtx holds variables for current executing memex. 447 StmtCtx *stmtctx.StatementContext 448 449 // AllowAggPushDown can be set to false to forbid aggregation push down. 450 AllowAggPushDown bool 451 452 // AllowBCJ means allow broadcast join. 453 AllowBCJ bool 454 // AllowDistinctAggPushDown can be set true to allow agg with distinct push down to einsteindb/tiflash. 455 AllowDistinctAggPushDown bool 456 457 // AllowWriteRowID can be set to false to forbid write data to _milevadb_rowid. 458 // This variable is currently not recommended to be turned on. 459 AllowWriteRowID bool 460 461 // AllowBatchCop means if we should send batch interlock to TiFlash. Default value is 1, means to use batch cop in case of aggregation and join. 462 // If value is set to 2 , which means to force to send batch cop for any query. Value is set to 0 means never use batch cop. 463 AllowBatchCop int 464 465 // MilevaDBAllowAutoRandExplicitInsert indicates whether explicit insertion on auto_random defCausumn is allowed. 466 AllowAutoRandExplicitInsert bool 467 468 // CorrelationThreshold is the guard to enable event count estimation using defCausumn order correlation. 469 CorrelationThreshold float64 470 471 // CorrelationExpFactor is used to control the heuristic approach of event count estimation when CorrelationThreshold is not met. 472 CorrelationExpFactor int 473 474 // CPUFactor is the CPU cost of processing one memex for one event. 475 CPUFactor float64 476 // CopCPUFactor is the CPU cost of processing one memex for one event in interlock. 477 CopCPUFactor float64 478 // CopTiFlashConcurrencyFactor is the concurrency number of computation in tiflash interlock. 479 CopTiFlashConcurrencyFactor float64 480 // NetworkFactor is the network cost of transferring 1 byte data. 481 NetworkFactor float64 482 // ScanFactor is the IO cost of scanning 1 byte data on EinsteinDB and TiFlash. 483 ScanFactor float64 484 // DescScanFactor is the IO cost of scanning 1 byte data on EinsteinDB and TiFlash in desc order. 485 DescScanFactor float64 486 // SeekFactor is the IO cost of seeking the start value of a range in EinsteinDB or TiFlash. 487 SeekFactor float64 488 // MemoryFactor is the memory cost of storing one tuple. 489 MemoryFactor float64 490 // DiskFactor is the IO cost of reading/writing one byte to temporary disk. 491 DiskFactor float64 492 // ConcurrencyFactor is the CPU cost of additional one goroutine. 493 ConcurrencyFactor float64 494 495 // CurrInsertValues is used to record current ValuesExpr's values. 496 // See http://dev.allegrosql.com/doc/refman/5.7/en/miscellaneous-functions.html#function_values 497 CurrInsertValues chunk.Row 498 499 // Per-connection time zones. Each client that connects has its own time zone setting, given by the stochastik time_zone variable. 500 // See https://dev.allegrosql.com/doc/refman/5.7/en/time-zone-support.html 501 TimeZone *time.Location 502 503 ALLEGROSQLMode allegrosql.ALLEGROSQLMode 504 505 // AutoIncrementIncrement and AutoIncrementOffset indicates the autoID's start value and increment. 506 AutoIncrementIncrement int 507 508 AutoIncrementOffset int 509 510 /* MilevaDB system variables */ 511 512 // SkipASCIICheck check on input value. 513 SkipASCIICheck bool 514 515 // SkipUTF8Check check on input value. 516 SkipUTF8Check bool 517 518 // BatchInsert indicates if we should split insert data into multiple batches. 519 BatchInsert bool 520 521 // BatchDelete indicates if we should split delete data into multiple batches. 522 BatchDelete bool 523 524 // BatchCommit indicates if we should split the transaction into multiple batches. 525 BatchCommit bool 526 527 // IDSlabPredictor is provided by ekvCausetEncoder, if it is provided, we will use it to alloc auto id instead of using 528 // Block.alloc. 529 IDSlabPredictor autoid.SlabPredictor 530 531 // OptimizerSelectivityLevel defines the level of the selectivity estimation in plan. 532 OptimizerSelectivityLevel int 533 534 // EnableBlockPartition enables causet partition feature. 535 EnableBlockPartition string 536 537 // EnableCascadesCausetAppend enables the cascades causet. 538 EnableCascadesCausetAppend bool 539 540 // EnableWindowFunction enables the window function. 541 EnableWindowFunction bool 542 543 // EnableVectorizedExpression enables the vectorized memex evaluation. 544 EnableVectorizedExpression bool 545 546 // DBSReorgPriority is the operation priority of adding indices. 547 DBSReorgPriority int 548 549 // EnableChangeDeferredCausetType is used to control whether to enable the change defCausumn type. 550 EnableChangeDeferredCausetType bool 551 552 // WaitSplitRegionFinish defines the split region behaviour is sync or async. 553 WaitSplitRegionFinish bool 554 555 // WaitSplitRegionTimeout defines the split region timeout. 556 WaitSplitRegionTimeout uint64 557 558 // EnableStreaming indicates whether the interlock request can use streaming API. 559 // TODO: remove this after milevadb-server configuration "enable-streaming' removed. 560 EnableStreaming bool 561 562 // EnableChunkRPC indicates whether the interlock request can use chunk API. 563 EnableChunkRPC bool 564 565 writeStmtBufs WriteStmtBufs 566 567 // L2CacheSize indicates the size of CPU L2 cache, using byte as unit. 568 L2CacheSize int 569 570 // EnableRadixJoin indicates whether to use radix hash join to execute 571 // HashJoin. 572 EnableRadixJoin bool 573 574 // ConstraintCheckInPlace indicates whether to check the constraint when the ALLEGROALLEGROSQL executing. 575 ConstraintCheckInPlace bool 576 577 // CommandValue indicates which command current stochastik is doing. 578 CommandValue uint32 579 580 // MilevaDBOptJoinReorderThreshold defines the minimal number of join nodes 581 // to use the greedy join reorder algorithm. 582 MilevaDBOptJoinReorderThreshold int 583 584 // SlowQueryFile indicates which slow query log file for SLOW_QUERY causet to parse. 585 SlowQueryFile string 586 587 // EnableFastAnalyze indicates whether to take fast analyze. 588 EnableFastAnalyze bool 589 590 // TxnMode indicates should be pessimistic or optimistic. 591 TxnMode string 592 593 // LowResolutionTSO is used for reading data with low resolution TSO which is uFIDelated once every two seconds. 594 LowResolutionTSO bool 595 596 // MaxInterDircutionTime is the timeout for select memex, in milliseconds. 597 // If the value is 0, timeouts are not enabled. 598 // See https://dev.allegrosql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_max_execution_time 599 MaxInterDircutionTime uint64 600 601 // Killed is a flag to indicate that this query is killed. 602 Killed uint32 603 604 // ConnectionInfo indicates current connection info used by current stochastik, only be lazy assigned by plugin. 605 ConnectionInfo *ConnectionInfo 606 607 // use noop funcs or not 608 EnableNoopFuncs bool 609 610 // StartTime is the start time of the last query. 611 StartTime time.Time 612 613 // DurationParse is the duration of parsing ALLEGROALLEGROSQL string to AST of the last query. 614 DurationParse time.Duration 615 616 // DurationCompile is the duration of compiling AST to execution plan of the last query. 617 DurationCompile time.Duration 618 619 // RewritePhaseInfo records all information about the rewriting phase. 620 RewritePhaseInfo 621 622 // DurationOptimization is the duration of optimizing a query. 623 DurationOptimization time.Duration 624 625 // DurationWaitTS is the duration of waiting for a snapshot TS 626 DurationWaitTS time.Duration 627 628 // PrevStmt is used to causetstore the previous executed memex in the current stochastik. 629 PrevStmt fmt.Stringer 630 631 // prevStmtDigest is used to causetstore the digest of the previous memex in the current stochastik. 632 prevStmtDigest string 633 634 // AllowRemoveAutoInc indicates whether a user can drop the auto_increment defCausumn attribute or not. 635 AllowRemoveAutoInc bool 636 637 // UseCausetBaselines indicates whether we will use plan baselines to adjust plan. 638 UseCausetBaselines bool 639 640 // EvolveCausetBaselines indicates whether we will evolve the plan baselines. 641 EvolveCausetBaselines bool 642 643 // Unexported fields should be accessed and set through interfaces like GetReplicaRead() and SetReplicaRead(). 644 645 // allowInSubqToJoinAnPosetDagg can be set to false to forbid rewriting the semi join to inner join with agg. 646 allowInSubqToJoinAnPosetDagg bool 647 648 // EnableIndexMerge enables the generation of IndexMergePath. 649 enableIndexMerge bool 650 651 // replicaRead is used for reading data from replicas, only follower is supported at this time. 652 replicaRead ekv.ReplicaReadType 653 654 // IsolationReadEngines is used to isolation read, milevadb only read from the stores whose engine type is in the engines. 655 IsolationReadEngines map[ekv.StoreType]struct{} 656 657 CausetAppendSelectBlockAsName []ast.HintBlock 658 659 // LockWaitTimeout is the duration waiting for pessimistic dagger in milliseconds 660 // negative value means nowait, 0 means default behavior, others means actual wait time 661 LockWaitTimeout int64 662 663 // MetricSchemaStep indicates the step when query metric schemaReplicant. 664 MetricSchemaStep int64 665 // MetricSchemaRangeDuration indicates the step when query metric schemaReplicant. 666 MetricSchemaRangeDuration int64 667 668 // Some data of cluster-level memory blocks will be retrieved many times in different inspection rules, 669 // and the cost of retrieving some data is expensive. We use the `BlockSnapshot` to cache those data 670 // and obtain them lazily, and provide a consistent view of inspection blocks for each inspection rules. 671 // All cached snapshots will be released at the end of retrieving 672 InspectionBlockCache map[string]BlockSnapshot 673 674 // RowCausetEncoder is reused in stochastik for encode event data. 675 RowCausetEncoder rowcodec.CausetEncoder 676 677 // SequenceState cache all sequence's latest value accessed by lastval() builtins. It's a stochastik scoped 678 // variable, and all public methods of SequenceState are currently-safe. 679 SequenceState *SequenceState 680 681 // WindowingUseHighPrecision determines whether to compute window operations without loss of precision. 682 // see https://dev.allegrosql.com/doc/refman/8.0/en/window-function-optimization.html for more details. 683 WindowingUseHighPrecision bool 684 685 // FoundInCausetCache indicates whether this memex was found in plan cache. 686 FoundInCausetCache bool 687 // PrevFoundInCausetCache indicates whether the last memex was found in plan cache. 688 PrevFoundInCausetCache bool 689 690 // OptimizerUseInvisibleIndexes indicates whether optimizer can use invisible index 691 OptimizerUseInvisibleIndexes bool 692 693 // SelectLimit limits the max counts of select memex's output 694 SelectLimit uint64 695 696 // EnableClusteredIndex indicates whether to enable clustered index when creating a new causet. 697 EnableClusteredIndex bool 698 699 // PresumeKeyNotExists indicates lazy existence checking is enabled. 700 PresumeKeyNotExists bool 701 702 // EnableParallelApply indicates that thether to use parallel apply. 703 EnableParallelApply bool 704 705 // ShardAllocateStep indicates the max size of continuous rowid shard in one transaction. 706 ShardAllocateStep int64 707 708 // EnableAmendPessimisticTxn indicates if schemaReplicant change amend is enabled for pessimistic transactions. 709 EnableAmendPessimisticTxn bool 710 711 // LastTxnInfo keeps track the info of last committed transaction. 712 LastTxnInfo ekv.TxnInfo 713 714 // PartitionPruneMode indicates how and when to prune partitions. 715 PartitionPruneMode PartitionPruneMode 716 } 717 718 // UseDynamicPartitionPrune indicates whether use new dynamic partition prune. 719 func (s *StochastikVars) UseDynamicPartitionPrune() bool { 720 return s.PartitionPruneMode == DynamicOnly 721 } 722 723 // PartitionPruneMode presents the prune mode used. 724 type PartitionPruneMode string 725 726 const ( 727 // StaticOnly indicates only prune at plan phase. 728 StaticOnly PartitionPruneMode = "static-only" 729 // DynamicOnly indicates only prune at execute phase. 730 DynamicOnly PartitionPruneMode = "dynamic-only" 731 // StaticButPrepareDynamic indicates prune at plan phase but defCauslect stats need for dynamic prune. 732 StaticButPrepareDynamic PartitionPruneMode = "static-defCauslect-dynamic" 733 ) 734 735 // Valid indicate PruneMode is validated. 736 func (p PartitionPruneMode) Valid() bool { 737 switch p { 738 case StaticOnly, StaticButPrepareDynamic, DynamicOnly: 739 return true 740 default: 741 return false 742 } 743 } 744 745 // PreparedParams contains the parameters of the current prepared memex when executing it. 746 type PreparedParams []types.Causet 747 748 func (pps PreparedParams) String() string { 749 if len(pps) == 0 { 750 return "" 751 } 752 return " [arguments: " + types.CausetsToStrNoErr(pps) + "]" 753 } 754 755 // ConnectionInfo present connection used by audit. 756 type ConnectionInfo struct { 757 ConnectionID uint32 758 ConnectionType string 759 Host string 760 ClientIP string 761 ClientPort string 762 ServerID int 763 ServerPort int 764 Duration float64 765 User string 766 ServerOSLoginUser string 767 OSVersion string 768 ClientVersion string 769 ServerVersion string 770 SSLVersion string 771 PID int 772 EDB string 773 } 774 775 // NewStochastikVars creates a stochastik vars object. 776 func NewStochastikVars() *StochastikVars { 777 vars := &StochastikVars{ 778 Users: make(map[string]types.Causet), 779 systems: make(map[string]string), 780 PreparedStmts: make(map[uint32]interface{}), 781 PreparedStmtNameToID: make(map[string]uint32), 782 PreparedParams: make([]types.Causet, 0, 10), 783 TxnCtx: &TransactionContext{}, 784 RetryInfo: &RetryInfo{}, 785 ActiveRoles: make([]*auth.RoleIdentity, 0, 10), 786 StrictALLEGROSQLMode: true, 787 AutoIncrementIncrement: DefAutoIncrementIncrement, 788 AutoIncrementOffset: DefAutoIncrementOffset, 789 Status: allegrosql.ServerStatusAutocommit, 790 StmtCtx: new(stmtctx.StatementContext), 791 AllowAggPushDown: false, 792 AllowBCJ: false, 793 OptimizerSelectivityLevel: DefMilevaDBOptimizerSelectivityLevel, 794 RetryLimit: DefMilevaDBRetryLimit, 795 DisableTxnAutoRetry: DefMilevaDBDisableTxnAutoRetry, 796 DBSReorgPriority: ekv.PriorityLow, 797 allowInSubqToJoinAnPosetDagg: DefOptInSubqToJoinAnPosetDagg, 798 CorrelationThreshold: DefOptCorrelationThreshold, 799 CorrelationExpFactor: DefOptCorrelationExpFactor, 800 CPUFactor: DefOptCPUFactor, 801 CopCPUFactor: DefOptCopCPUFactor, 802 CopTiFlashConcurrencyFactor: DefOptTiFlashConcurrencyFactor, 803 NetworkFactor: DefOptNetworkFactor, 804 ScanFactor: DefOptScanFactor, 805 DescScanFactor: DefOptDescScanFactor, 806 SeekFactor: DefOptSeekFactor, 807 MemoryFactor: DefOptMemoryFactor, 808 DiskFactor: DefOptDiskFactor, 809 ConcurrencyFactor: DefOptConcurrencyFactor, 810 EnableRadixJoin: false, 811 EnableVectorizedExpression: DefEnableVectorizedExpression, 812 L2CacheSize: cpuid.CPU.Cache.L2, 813 CommandValue: uint32(allegrosql.ComSleep), 814 MilevaDBOptJoinReorderThreshold: DefMilevaDBOptJoinReorderThreshold, 815 SlowQueryFile: config.GetGlobalConfig().Log.SlowQueryFile, 816 WaitSplitRegionFinish: DefMilevaDBWaitSplitRegionFinish, 817 WaitSplitRegionTimeout: DefWaitSplitRegionTimeout, 818 enableIndexMerge: false, 819 EnableNoopFuncs: DefMilevaDBEnableNoopFuncs, 820 replicaRead: ekv.ReplicaReadLeader, 821 AllowRemoveAutoInc: DefMilevaDBAllowRemoveAutoInc, 822 UseCausetBaselines: DefMilevaDBUseCausetBaselines, 823 EvolveCausetBaselines: DefMilevaDBEvolveCausetBaselines, 824 IsolationReadEngines: make(map[ekv.StoreType]struct{}), 825 LockWaitTimeout: DefInnodbLockWaitTimeout * 1000, 826 MetricSchemaStep: DefMilevaDBMetricSchemaStep, 827 MetricSchemaRangeDuration: DefMilevaDBMetricSchemaRangeDuration, 828 SequenceState: NewSequenceState(), 829 WindowingUseHighPrecision: true, 830 PrevFoundInCausetCache: DefMilevaDBFoundInCausetCache, 831 FoundInCausetCache: DefMilevaDBFoundInCausetCache, 832 SelectLimit: math.MaxUint64, 833 AllowAutoRandExplicitInsert: DefMilevaDBAllowAutoRandExplicitInsert, 834 EnableClusteredIndex: DefMilevaDBEnableClusteredIndex, 835 EnableParallelApply: DefMilevaDBEnableParallelApply, 836 ShardAllocateStep: DefMilevaDBShardAllocateStep, 837 EnableChangeDeferredCausetType: DefMilevaDBChangeDeferredCausetType, 838 EnableAmendPessimisticTxn: DefMilevaDBEnableAmendPessimisticTxn, 839 } 840 vars.KVVars = ekv.NewVariables(&vars.Killed) 841 vars.Concurrency = Concurrency{ 842 indexLookupConcurrency: DefIndexLookupConcurrency, 843 indexSerialScanConcurrency: DefIndexSerialScanConcurrency, 844 indexLookupJoinConcurrency: DefIndexLookupJoinConcurrency, 845 hashJoinConcurrency: DefMilevaDBHashJoinConcurrency, 846 projectionConcurrency: DefMilevaDBProjectionConcurrency, 847 distALLEGROSQLScanConcurrency: DefDistALLEGROSQLScanConcurrency, 848 hashAggPartialConcurrency: DefMilevaDBHashAggPartialConcurrency, 849 hashAggFinalConcurrency: DefMilevaDBHashAggFinalConcurrency, 850 windowConcurrency: DefMilevaDBWindowConcurrency, 851 InterlockingDirectorateConcurrency: DefInterlockingDirectorateConcurrency, 852 } 853 vars.MemQuota = MemQuota{ 854 MemQuotaQuery: config.GetGlobalConfig().MemQuotaQuery, 855 NestedLoopJoinCacheCapacity: config.GetGlobalConfig().NestedLoopJoinCacheCapacity, 856 857 // The variables below do not take any effect anymore, it's remaining for compatibility. 858 // TODO: remove them in v4.1 859 MemQuotaHashJoin: DefMilevaDBMemQuotaHashJoin, 860 MemQuotaMergeJoin: DefMilevaDBMemQuotaMergeJoin, 861 MemQuotaSort: DefMilevaDBMemQuotaSort, 862 MemQuotaTopn: DefMilevaDBMemQuotaTopn, 863 MemQuotaIndexLookupReader: DefMilevaDBMemQuotaIndexLookupReader, 864 MemQuotaIndexLookupJoin: DefMilevaDBMemQuotaIndexLookupJoin, 865 MemQuotaNestedLoopApply: DefMilevaDBMemQuotaNestedLoopApply, 866 MemQuotaDistALLEGROSQL: DefMilevaDBMemQuotaDistALLEGROSQL, 867 } 868 vars.BatchSize = BatchSize{ 869 IndexJoinBatchSize: DefIndexJoinBatchSize, 870 IndexLookupSize: DefIndexLookupSize, 871 InitChunkSize: DefInitChunkSize, 872 MaxChunkSize: DefMaxChunkSize, 873 } 874 vars.DMLBatchSize = DefDMLBatchSize 875 var enableStreaming string 876 if config.GetGlobalConfig().EnableStreaming { 877 enableStreaming = "1" 878 } else { 879 enableStreaming = "0" 880 } 881 terror.Log(vars.SetSystemVar(MilevaDBEnableStreaming, enableStreaming)) 882 883 vars.AllowBatchCop = DefMilevaDBAllowBatchCop 884 885 var enableChunkRPC string 886 if config.GetGlobalConfig().EinsteinDBClient.EnableChunkRPC { 887 enableChunkRPC = "1" 888 } else { 889 enableChunkRPC = "0" 890 } 891 terror.Log(vars.SetSystemVar(MilevaDBEnableChunkRPC, enableChunkRPC)) 892 for _, engine := range config.GetGlobalConfig().IsolationRead.Engines { 893 switch engine { 894 case ekv.TiFlash.Name(): 895 vars.IsolationReadEngines[ekv.TiFlash] = struct{}{} 896 case ekv.EinsteinDB.Name(): 897 vars.IsolationReadEngines[ekv.EinsteinDB] = struct{}{} 898 case ekv.MilevaDB.Name(): 899 vars.IsolationReadEngines[ekv.MilevaDB] = struct{}{} 900 } 901 } 902 return vars 903 } 904 905 // GetAllowInSubqToJoinAnPosetDagg get AllowInSubqToJoinAnPosetDagg from allegrosql hints and StochastikVars.allowInSubqToJoinAnPosetDagg. 906 func (s *StochastikVars) GetAllowInSubqToJoinAnPosetDagg() bool { 907 if s.StmtCtx.HasAllowInSubqToJoinAnPosetDaggHint { 908 return s.StmtCtx.AllowInSubqToJoinAnPosetDagg 909 } 910 return s.allowInSubqToJoinAnPosetDagg 911 } 912 913 // SetAllowInSubqToJoinAnPosetDagg set StochastikVars.allowInSubqToJoinAnPosetDagg. 914 func (s *StochastikVars) SetAllowInSubqToJoinAnPosetDagg(val bool) { 915 s.allowInSubqToJoinAnPosetDagg = val 916 } 917 918 // GetEnableCascadesCausetAppend get EnableCascadesCausetAppend from allegrosql hints and StochastikVars.EnableCascadesCausetAppend. 919 func (s *StochastikVars) GetEnableCascadesCausetAppend() bool { 920 if s.StmtCtx.HasEnableCascadesCausetAppendHint { 921 return s.StmtCtx.EnableCascadesCausetAppend 922 } 923 return s.EnableCascadesCausetAppend 924 } 925 926 // SetEnableCascadesCausetAppend set StochastikVars.EnableCascadesCausetAppend. 927 func (s *StochastikVars) SetEnableCascadesCausetAppend(val bool) { 928 s.EnableCascadesCausetAppend = val 929 } 930 931 // GetEnableIndexMerge get EnableIndexMerge from StochastikVars.enableIndexMerge. 932 func (s *StochastikVars) GetEnableIndexMerge() bool { 933 return s.enableIndexMerge 934 } 935 936 // SetEnableIndexMerge set StochastikVars.enableIndexMerge. 937 func (s *StochastikVars) SetEnableIndexMerge(val bool) { 938 s.enableIndexMerge = val 939 } 940 941 // GetReplicaRead get ReplicaRead from allegrosql hints and StochastikVars.replicaRead. 942 func (s *StochastikVars) GetReplicaRead() ekv.ReplicaReadType { 943 if s.StmtCtx.HasReplicaReadHint { 944 return ekv.ReplicaReadType(s.StmtCtx.ReplicaRead) 945 } 946 return s.replicaRead 947 } 948 949 // SetReplicaRead set StochastikVars.replicaRead. 950 func (s *StochastikVars) SetReplicaRead(val ekv.ReplicaReadType) { 951 s.replicaRead = val 952 } 953 954 // GetWriteStmtBufs get pointer of StochastikVars.writeStmtBufs. 955 func (s *StochastikVars) GetWriteStmtBufs() *WriteStmtBufs { 956 return &s.writeStmtBufs 957 } 958 959 // GetSplitRegionTimeout gets split region timeout. 960 func (s *StochastikVars) GetSplitRegionTimeout() time.Duration { 961 return time.Duration(s.WaitSplitRegionTimeout) * time.Second 962 } 963 964 // GetIsolationReadEngines gets isolation read engines. 965 func (s *StochastikVars) GetIsolationReadEngines() map[ekv.StoreType]struct{} { 966 return s.IsolationReadEngines 967 } 968 969 // CleanBuffers cleans the temporary bufs 970 func (s *StochastikVars) CleanBuffers() { 971 s.GetWriteStmtBufs().clean() 972 } 973 974 // AllocCausetDeferredCausetID allocates defCausumn id for plan. 975 func (s *StochastikVars) AllocCausetDeferredCausetID() int64 { 976 s.CausetDeferredCausetID++ 977 return s.CausetDeferredCausetID 978 } 979 980 // GetCharsetInfo gets charset and defCauslation for current context. 981 // What character set should the server translate a memex to after receiving it? 982 // For this, the server uses the character_set_connection and defCauslation_connection system variables. 983 // It converts memexs sent by the client from character_set_client to character_set_connection 984 // (except for string literals that have an introducer such as _latin1 or _utf8). 985 // defCauslation_connection is important for comparisons of literal strings. 986 // For comparisons of strings with defCausumn values, defCauslation_connection does not matter because defCausumns 987 // have their own defCauslation, which has a higher defCauslation precedence. 988 // See https://dev.allegrosql.com/doc/refman/5.7/en/charset-connection.html 989 func (s *StochastikVars) GetCharsetInfo() (charset, defCauslation string) { 990 charset = s.systems[CharacterSetConnection] 991 defCauslation = s.systems[DefCauslationConnection] 992 return 993 } 994 995 // SetUserVar set the value and defCauslation for user defined variable. 996 func (s *StochastikVars) SetUserVar(varName string, svalue string, defCauslation string) { 997 if len(defCauslation) > 0 { 998 s.Users[varName] = types.NewDefCauslationStringCauset(stringutil.Copy(svalue), defCauslation, defCauslate.DefaultLen) 999 } else { 1000 _, defCauslation = s.GetCharsetInfo() 1001 s.Users[varName] = types.NewDefCauslationStringCauset(stringutil.Copy(svalue), defCauslation, defCauslate.DefaultLen) 1002 } 1003 } 1004 1005 // SetLastInsertID saves the last insert id to the stochastik context. 1006 // TODO: we may causetstore the result for last_insert_id sys var later. 1007 func (s *StochastikVars) SetLastInsertID(insertID uint64) { 1008 s.StmtCtx.LastInsertID = insertID 1009 } 1010 1011 // SetStatusFlag sets the stochastik server status variable. 1012 // If on is ture sets the flag in stochastik status, 1013 // otherwise removes the flag. 1014 func (s *StochastikVars) SetStatusFlag(flag uint16, on bool) { 1015 if on { 1016 s.Status |= flag 1017 return 1018 } 1019 s.Status &= ^flag 1020 } 1021 1022 // GetStatusFlag gets the stochastik server status variable, returns true if it is on. 1023 func (s *StochastikVars) GetStatusFlag(flag uint16) bool { 1024 return s.Status&flag > 0 1025 } 1026 1027 // InTxn returns if the stochastik is in transaction. 1028 func (s *StochastikVars) InTxn() bool { 1029 return s.GetStatusFlag(allegrosql.ServerStatusInTrans) 1030 } 1031 1032 // IsAutocommit returns if the stochastik is set to autocommit. 1033 func (s *StochastikVars) IsAutocommit() bool { 1034 return s.GetStatusFlag(allegrosql.ServerStatusAutocommit) 1035 } 1036 1037 // IsReadConsistencyTxn if true it means the transaction is an read consistency (read committed) transaction. 1038 func (s *StochastikVars) IsReadConsistencyTxn() bool { 1039 if s.TxnCtx.Isolation != "" { 1040 return s.TxnCtx.Isolation == ast.ReadCommitted 1041 } 1042 if s.txnIsolationLevelOneShot.state == oneShotUse { 1043 s.TxnCtx.Isolation = s.txnIsolationLevelOneShot.value 1044 } 1045 if s.TxnCtx.Isolation == "" { 1046 s.TxnCtx.Isolation, _ = s.GetSystemVar(TxnIsolation) 1047 } 1048 return s.TxnCtx.Isolation == ast.ReadCommitted 1049 } 1050 1051 // SetTxnIsolationLevelOneShotStateForNextTxn sets the txnIsolationLevelOneShot.state for next transaction. 1052 func (s *StochastikVars) SetTxnIsolationLevelOneShotStateForNextTxn() { 1053 if isoLevelOneShot := &s.txnIsolationLevelOneShot; isoLevelOneShot.state != oneShotDef { 1054 switch isoLevelOneShot.state { 1055 case oneShotSet: 1056 isoLevelOneShot.state = oneShotUse 1057 case oneShotUse: 1058 isoLevelOneShot.state = oneShotDef 1059 isoLevelOneShot.value = "" 1060 } 1061 } 1062 } 1063 1064 // IsPessimisticReadConsistency if true it means the memex is in an read consistency pessimistic transaction. 1065 func (s *StochastikVars) IsPessimisticReadConsistency() bool { 1066 return s.TxnCtx.IsPessimistic && s.IsReadConsistencyTxn() 1067 } 1068 1069 // GetNextPreparedStmtID generates and returns the next stochastik scope prepared memex id. 1070 func (s *StochastikVars) GetNextPreparedStmtID() uint32 { 1071 s.preparedStmtID++ 1072 return s.preparedStmtID 1073 } 1074 1075 // Location returns the value of time_zone stochastik variable. If it is nil, then return time.Local. 1076 func (s *StochastikVars) Location() *time.Location { 1077 loc := s.TimeZone 1078 if loc == nil { 1079 loc = timeutil.SystemLocation() 1080 } 1081 return loc 1082 } 1083 1084 // GetSystemVar gets the string value of a system variable. 1085 func (s *StochastikVars) GetSystemVar(name string) (string, bool) { 1086 if name == WarningCount { 1087 return strconv.Itoa(s.SysWarningCount), true 1088 } else if name == ErrorCount { 1089 return strconv.Itoa(int(s.SysErrorCount)), true 1090 } 1091 val, ok := s.systems[name] 1092 return val, ok 1093 } 1094 1095 func (s *StochastikVars) setDBSReorgPriority(val string) { 1096 val = strings.ToLower(val) 1097 switch val { 1098 case "priority_low": 1099 s.DBSReorgPriority = ekv.PriorityLow 1100 case "priority_normal": 1101 s.DBSReorgPriority = ekv.PriorityNormal 1102 case "priority_high": 1103 s.DBSReorgPriority = ekv.PriorityHigh 1104 default: 1105 s.DBSReorgPriority = ekv.PriorityLow 1106 } 1107 } 1108 1109 // AddPreparedStmt adds prepareStmt to current stochastik and count in global. 1110 func (s *StochastikVars) AddPreparedStmt(stmtID uint32, stmt interface{}) error { 1111 if _, exists := s.PreparedStmts[stmtID]; !exists { 1112 valStr, _ := s.GetSystemVar(MaxPreparedStmtCount) 1113 maxPreparedStmtCount, err := strconv.ParseInt(valStr, 10, 64) 1114 if err != nil { 1115 maxPreparedStmtCount = DefMaxPreparedStmtCount 1116 } 1117 newPreparedStmtCount := atomic.AddInt64(&preparedStmtCount, 1) 1118 if maxPreparedStmtCount >= 0 && newPreparedStmtCount > maxPreparedStmtCount { 1119 atomic.AddInt64(&preparedStmtCount, -1) 1120 return ErrMaxPreparedStmtCountReached.GenWithStackByArgs(maxPreparedStmtCount) 1121 } 1122 metrics.PreparedStmtGauge.Set(float64(newPreparedStmtCount)) 1123 } 1124 s.PreparedStmts[stmtID] = stmt 1125 return nil 1126 } 1127 1128 // RemovePreparedStmt removes preparedStmt from current stochastik and decrease count in global. 1129 func (s *StochastikVars) RemovePreparedStmt(stmtID uint32) { 1130 _, exists := s.PreparedStmts[stmtID] 1131 if !exists { 1132 return 1133 } 1134 delete(s.PreparedStmts, stmtID) 1135 afterMinus := atomic.AddInt64(&preparedStmtCount, -1) 1136 metrics.PreparedStmtGauge.Set(float64(afterMinus)) 1137 } 1138 1139 // WithdrawAllPreparedStmt remove all preparedStmt in current stochastik and decrease count in global. 1140 func (s *StochastikVars) WithdrawAllPreparedStmt() { 1141 psCount := len(s.PreparedStmts) 1142 if psCount == 0 { 1143 return 1144 } 1145 afterMinus := atomic.AddInt64(&preparedStmtCount, -int64(psCount)) 1146 metrics.PreparedStmtGauge.Set(float64(afterMinus)) 1147 } 1148 1149 // SetSystemVar sets the value of a system variable. 1150 func (s *StochastikVars) SetSystemVar(name string, val string) error { 1151 switch name { 1152 case TxnIsolationOneShot: 1153 switch val { 1154 case "SERIALIZABLE", "READ-UNCOMMITTED": 1155 skipIsolationLevelCheck, err := GetStochastikSystemVar(s, MilevaDBSkipIsolationLevelCheck) 1156 returnErr := ErrUnsupportedIsolationLevel.GenWithStackByArgs(val) 1157 if err != nil { 1158 returnErr = err 1159 } 1160 if !MilevaDBOptOn(skipIsolationLevelCheck) || err != nil { 1161 return returnErr 1162 } 1163 //SET TRANSACTION ISOLATION LEVEL will affect two internal variables: 1164 // 1. tx_isolation 1165 // 2. transaction_isolation 1166 // The following if condition is used to deduplicate two same warnings. 1167 if name == "transaction_isolation" { 1168 s.StmtCtx.AppendWarning(returnErr) 1169 } 1170 } 1171 s.txnIsolationLevelOneShot.state = oneShotSet 1172 s.txnIsolationLevelOneShot.value = val 1173 case TimeZone: 1174 tz, err := parseTimeZone(val) 1175 if err != nil { 1176 return err 1177 } 1178 s.TimeZone = tz 1179 case ALLEGROSQLModeVar: 1180 val = allegrosql.FormatALLEGROSQLModeStr(val) 1181 // Modes is a list of different modes separated by commas. 1182 sqlMode, err2 := allegrosql.GetALLEGROSQLMode(val) 1183 if err2 != nil { 1184 return errors.Trace(err2) 1185 } 1186 s.StrictALLEGROSQLMode = sqlMode.HasStrictMode() 1187 s.ALLEGROSQLMode = sqlMode 1188 s.SetStatusFlag(allegrosql.ServerStatusNoBackslashEscaped, sqlMode.HasNoBackslashEscapesMode()) 1189 case MilevaDBSnapshot: 1190 err := setSnapshotTS(s, val) 1191 if err != nil { 1192 return err 1193 } 1194 case AutoCommit: 1195 isAutocommit := MilevaDBOptOn(val) 1196 s.SetStatusFlag(allegrosql.ServerStatusAutocommit, isAutocommit) 1197 if isAutocommit { 1198 s.SetStatusFlag(allegrosql.ServerStatusInTrans, false) 1199 } 1200 case AutoIncrementIncrement: 1201 // AutoIncrementIncrement is valid in [1, 65535]. 1202 s.AutoIncrementIncrement = milevadbOptPositiveInt32(val, DefAutoIncrementIncrement) 1203 case AutoIncrementOffset: 1204 // AutoIncrementOffset is valid in [1, 65535]. 1205 s.AutoIncrementOffset = milevadbOptPositiveInt32(val, DefAutoIncrementOffset) 1206 case MaxInterDircutionTime: 1207 timeoutMS := milevadbOptPositiveInt32(val, 0) 1208 s.MaxInterDircutionTime = uint64(timeoutMS) 1209 case InnodbLockWaitTimeout: 1210 lockWaitSec := milevadbOptInt64(val, DefInnodbLockWaitTimeout) 1211 s.LockWaitTimeout = lockWaitSec * 1000 1212 case WindowingUseHighPrecision: 1213 s.WindowingUseHighPrecision = MilevaDBOptOn(val) 1214 case MilevaDBSkipUTF8Check: 1215 s.SkipUTF8Check = MilevaDBOptOn(val) 1216 case MilevaDBSkipASCIICheck: 1217 s.SkipASCIICheck = MilevaDBOptOn(val) 1218 case MilevaDBOptAggPushDown: 1219 s.AllowAggPushDown = MilevaDBOptOn(val) 1220 case MilevaDBOptBCJ: 1221 s.AllowBCJ = MilevaDBOptOn(val) 1222 case MilevaDBOptDistinctAggPushDown: 1223 s.AllowDistinctAggPushDown = MilevaDBOptOn(val) 1224 case MilevaDBOptWriteRowID: 1225 s.AllowWriteRowID = MilevaDBOptOn(val) 1226 case MilevaDBOptInSubqToJoinAnPosetDagg: 1227 s.SetAllowInSubqToJoinAnPosetDagg(MilevaDBOptOn(val)) 1228 case MilevaDBOptCorrelationThreshold: 1229 s.CorrelationThreshold = milevadbOptFloat64(val, DefOptCorrelationThreshold) 1230 case MilevaDBOptCorrelationExpFactor: 1231 s.CorrelationExpFactor = int(milevadbOptInt64(val, DefOptCorrelationExpFactor)) 1232 case MilevaDBOptCPUFactor: 1233 s.CPUFactor = milevadbOptFloat64(val, DefOptCPUFactor) 1234 case MilevaDBOptCopCPUFactor: 1235 s.CopCPUFactor = milevadbOptFloat64(val, DefOptCopCPUFactor) 1236 case MilevaDBOptTiFlashConcurrencyFactor: 1237 s.CopTiFlashConcurrencyFactor = milevadbOptFloat64(val, DefOptTiFlashConcurrencyFactor) 1238 case MilevaDBOptNetworkFactor: 1239 s.NetworkFactor = milevadbOptFloat64(val, DefOptNetworkFactor) 1240 case MilevaDBOptScanFactor: 1241 s.ScanFactor = milevadbOptFloat64(val, DefOptScanFactor) 1242 case MilevaDBOptDescScanFactor: 1243 s.DescScanFactor = milevadbOptFloat64(val, DefOptDescScanFactor) 1244 case MilevaDBOptSeekFactor: 1245 s.SeekFactor = milevadbOptFloat64(val, DefOptSeekFactor) 1246 case MilevaDBOptMemoryFactor: 1247 s.MemoryFactor = milevadbOptFloat64(val, DefOptMemoryFactor) 1248 case MilevaDBOptDiskFactor: 1249 s.DiskFactor = milevadbOptFloat64(val, DefOptDiskFactor) 1250 case MilevaDBOptConcurrencyFactor: 1251 s.ConcurrencyFactor = milevadbOptFloat64(val, DefOptConcurrencyFactor) 1252 case MilevaDBIndexLookupConcurrency: 1253 s.indexLookupConcurrency = milevadbOptPositiveInt32(val, ConcurrencyUnset) 1254 case MilevaDBIndexLookupJoinConcurrency: 1255 s.indexLookupJoinConcurrency = milevadbOptPositiveInt32(val, ConcurrencyUnset) 1256 case MilevaDBIndexJoinBatchSize: 1257 s.IndexJoinBatchSize = milevadbOptPositiveInt32(val, DefIndexJoinBatchSize) 1258 case MilevaDBAllowBatchCop: 1259 s.AllowBatchCop = int(milevadbOptInt64(val, DefMilevaDBAllowBatchCop)) 1260 case MilevaDBIndexLookupSize: 1261 s.IndexLookupSize = milevadbOptPositiveInt32(val, DefIndexLookupSize) 1262 case MilevaDBHashJoinConcurrency: 1263 s.hashJoinConcurrency = milevadbOptPositiveInt32(val, ConcurrencyUnset) 1264 case MilevaDBProjectionConcurrency: 1265 s.projectionConcurrency = milevadbOptPositiveInt32(val, ConcurrencyUnset) 1266 case MilevaDBHashAggPartialConcurrency: 1267 s.hashAggPartialConcurrency = milevadbOptPositiveInt32(val, ConcurrencyUnset) 1268 case MilevaDBHashAggFinalConcurrency: 1269 s.hashAggFinalConcurrency = milevadbOptPositiveInt32(val, ConcurrencyUnset) 1270 case MilevaDBWindowConcurrency: 1271 s.windowConcurrency = milevadbOptPositiveInt32(val, ConcurrencyUnset) 1272 case MilevaDBDistALLEGROSQLScanConcurrency: 1273 s.distALLEGROSQLScanConcurrency = milevadbOptPositiveInt32(val, DefDistALLEGROSQLScanConcurrency) 1274 case MilevaDBIndexSerialScanConcurrency: 1275 s.indexSerialScanConcurrency = milevadbOptPositiveInt32(val, DefIndexSerialScanConcurrency) 1276 case MilevaDBInterlockingDirectorateConcurrency: 1277 s.InterlockingDirectorateConcurrency = milevadbOptPositiveInt32(val, DefInterlockingDirectorateConcurrency) 1278 case MilevaDBBackoffLockFast: 1279 s.KVVars.BackoffLockFast = milevadbOptPositiveInt32(val, ekv.DefBackoffLockFast) 1280 case MilevaDBBackOffWeight: 1281 s.KVVars.BackOffWeight = milevadbOptPositiveInt32(val, ekv.DefBackOffWeight) 1282 case MilevaDBConstraintCheckInPlace: 1283 s.ConstraintCheckInPlace = MilevaDBOptOn(val) 1284 case MilevaDBBatchInsert: 1285 s.BatchInsert = MilevaDBOptOn(val) 1286 case MilevaDBBatchDelete: 1287 s.BatchDelete = MilevaDBOptOn(val) 1288 case MilevaDBBatchCommit: 1289 s.BatchCommit = MilevaDBOptOn(val) 1290 case MilevaDBDMLBatchSize: 1291 s.DMLBatchSize = int(milevadbOptInt64(val, DefOptCorrelationExpFactor)) 1292 case MilevaDBCurrentTS, MilevaDBLastTxnInfo, MilevaDBConfig: 1293 return ErrReadOnly 1294 case MilevaDBMaxChunkSize: 1295 s.MaxChunkSize = milevadbOptPositiveInt32(val, DefMaxChunkSize) 1296 case MilevaDBInitChunkSize: 1297 s.InitChunkSize = milevadbOptPositiveInt32(val, DefInitChunkSize) 1298 case MilevaDBMemQuotaQuery: 1299 s.MemQuotaQuery = milevadbOptInt64(val, config.GetGlobalConfig().MemQuotaQuery) 1300 case MilevaDBNestedLoopJoinCacheCapacity: 1301 s.NestedLoopJoinCacheCapacity = milevadbOptInt64(val, config.GetGlobalConfig().NestedLoopJoinCacheCapacity) 1302 case MilevaDBMemQuotaHashJoin: 1303 s.MemQuotaHashJoin = milevadbOptInt64(val, DefMilevaDBMemQuotaHashJoin) 1304 case MilevaDBMemQuotaMergeJoin: 1305 s.MemQuotaMergeJoin = milevadbOptInt64(val, DefMilevaDBMemQuotaMergeJoin) 1306 case MilevaDBMemQuotaSort: 1307 s.MemQuotaSort = milevadbOptInt64(val, DefMilevaDBMemQuotaSort) 1308 case MilevaDBMemQuotaTopn: 1309 s.MemQuotaTopn = milevadbOptInt64(val, DefMilevaDBMemQuotaTopn) 1310 case MilevaDBMemQuotaIndexLookupReader: 1311 s.MemQuotaIndexLookupReader = milevadbOptInt64(val, DefMilevaDBMemQuotaIndexLookupReader) 1312 case MilevaDBMemQuotaIndexLookupJoin: 1313 s.MemQuotaIndexLookupJoin = milevadbOptInt64(val, DefMilevaDBMemQuotaIndexLookupJoin) 1314 case MilevaDBMemQuotaNestedLoopApply: 1315 s.MemQuotaNestedLoopApply = milevadbOptInt64(val, DefMilevaDBMemQuotaNestedLoopApply) 1316 case MilevaDBGeneralLog: 1317 atomic.StoreUint32(&ProcessGeneralLog, uint32(milevadbOptPositiveInt32(val, DefMilevaDBGeneralLog))) 1318 case MilevaDBPProfALLEGROSQLCPU: 1319 EnablePProfALLEGROSQLCPU.CausetStore(uint32(milevadbOptPositiveInt32(val, DefMilevaDBPProfALLEGROSQLCPU)) > 0) 1320 case MilevaDBDBSSlowOprThreshold: 1321 atomic.StoreUint32(&DBSSlowOprThreshold, uint32(milevadbOptPositiveInt32(val, DefMilevaDBDBSSlowOprThreshold))) 1322 case MilevaDBRetryLimit: 1323 s.RetryLimit = milevadbOptInt64(val, DefMilevaDBRetryLimit) 1324 case MilevaDBDisableTxnAutoRetry: 1325 s.DisableTxnAutoRetry = MilevaDBOptOn(val) 1326 case MilevaDBEnableStreaming: 1327 s.EnableStreaming = MilevaDBOptOn(val) 1328 case MilevaDBEnableChunkRPC: 1329 s.EnableChunkRPC = MilevaDBOptOn(val) 1330 case MilevaDBEnableCascadesCausetAppend: 1331 s.SetEnableCascadesCausetAppend(MilevaDBOptOn(val)) 1332 case MilevaDBOptimizerSelectivityLevel: 1333 s.OptimizerSelectivityLevel = milevadbOptPositiveInt32(val, DefMilevaDBOptimizerSelectivityLevel) 1334 case MilevaDBEnableBlockPartition: 1335 s.EnableBlockPartition = val 1336 case MilevaDBDBSReorgPriority: 1337 s.setDBSReorgPriority(val) 1338 case MilevaDBForcePriority: 1339 atomic.StoreInt32(&ForcePriority, int32(allegrosql.Str2Priority(val))) 1340 case MilevaDBEnableRadixJoin: 1341 s.EnableRadixJoin = MilevaDBOptOn(val) 1342 case MilevaDBEnableWindowFunction: 1343 s.EnableWindowFunction = MilevaDBOptOn(val) 1344 case MilevaDBEnableVectorizedExpression: 1345 s.EnableVectorizedExpression = MilevaDBOptOn(val) 1346 case MilevaDBOptJoinReorderThreshold: 1347 s.MilevaDBOptJoinReorderThreshold = milevadbOptPositiveInt32(val, DefMilevaDBOptJoinReorderThreshold) 1348 case MilevaDBSlowQueryFile: 1349 s.SlowQueryFile = val 1350 case MilevaDBEnableFastAnalyze: 1351 s.EnableFastAnalyze = MilevaDBOptOn(val) 1352 case MilevaDBWaitSplitRegionFinish: 1353 s.WaitSplitRegionFinish = MilevaDBOptOn(val) 1354 case MilevaDBWaitSplitRegionTimeout: 1355 s.WaitSplitRegionTimeout = uint64(milevadbOptPositiveInt32(val, DefWaitSplitRegionTimeout)) 1356 case MilevaDBExpensiveQueryTimeThreshold: 1357 atomic.StoreUint64(&ExpensiveQueryTimeThreshold, uint64(milevadbOptPositiveInt32(val, DefMilevaDBExpensiveQueryTimeThreshold))) 1358 case MilevaDBTxnMode: 1359 s.TxnMode = strings.ToUpper(val) 1360 case MilevaDBRowFormatVersion: 1361 formatVersion := int(milevadbOptInt64(val, DefMilevaDBRowFormatV1)) 1362 if formatVersion == DefMilevaDBRowFormatV1 { 1363 s.RowCausetEncoder.Enable = false 1364 } else if formatVersion == DefMilevaDBRowFormatV2 { 1365 s.RowCausetEncoder.Enable = true 1366 } 1367 case MilevaDBLowResolutionTSO: 1368 s.LowResolutionTSO = MilevaDBOptOn(val) 1369 case MilevaDBEnableIndexMerge: 1370 s.SetEnableIndexMerge(MilevaDBOptOn(val)) 1371 case MilevaDBEnableNoopFuncs: 1372 s.EnableNoopFuncs = MilevaDBOptOn(val) 1373 case MilevaDBReplicaRead: 1374 if strings.EqualFold(val, "follower") { 1375 s.SetReplicaRead(ekv.ReplicaReadFollower) 1376 } else if strings.EqualFold(val, "leader-and-follower") { 1377 s.SetReplicaRead(ekv.ReplicaReadMixed) 1378 } else if strings.EqualFold(val, "leader") || len(val) == 0 { 1379 s.SetReplicaRead(ekv.ReplicaReadLeader) 1380 } 1381 case MilevaDBAllowRemoveAutoInc: 1382 s.AllowRemoveAutoInc = MilevaDBOptOn(val) 1383 // It's a global variable, but it also wants to be cached in server. 1384 case MilevaDBMaxDeltaSchemaCount: 1385 SetMaxDeltaSchemaCount(milevadbOptInt64(val, DefMilevaDBMaxDeltaSchemaCount)) 1386 case MilevaDBUseCausetBaselines: 1387 s.UseCausetBaselines = MilevaDBOptOn(val) 1388 case MilevaDBEvolveCausetBaselines: 1389 s.EvolveCausetBaselines = MilevaDBOptOn(val) 1390 case MilevaDBIsolationReadEngines: 1391 s.IsolationReadEngines = make(map[ekv.StoreType]struct{}) 1392 for _, engine := range strings.Split(val, ",") { 1393 switch engine { 1394 case ekv.EinsteinDB.Name(): 1395 s.IsolationReadEngines[ekv.EinsteinDB] = struct{}{} 1396 case ekv.TiFlash.Name(): 1397 s.IsolationReadEngines[ekv.TiFlash] = struct{}{} 1398 case ekv.MilevaDB.Name(): 1399 s.IsolationReadEngines[ekv.MilevaDB] = struct{}{} 1400 } 1401 } 1402 case MilevaDBStoreLimit: 1403 storeutil.StoreLimit.CausetStore(milevadbOptInt64(val, DefMilevaDBStoreLimit)) 1404 case MilevaDBMetricSchemaStep: 1405 s.MetricSchemaStep = milevadbOptInt64(val, DefMilevaDBMetricSchemaStep) 1406 case MilevaDBMetricSchemaRangeDuration: 1407 s.MetricSchemaRangeDuration = milevadbOptInt64(val, DefMilevaDBMetricSchemaRangeDuration) 1408 case DefCauslationConnection, DefCauslationDatabase, DefCauslationServer: 1409 if _, err := defCauslate.GetDefCauslationByName(val); err != nil { 1410 var ok bool 1411 var charsetVal string 1412 var err2 error 1413 if name == DefCauslationConnection { 1414 charsetVal, ok = s.systems[CharacterSetConnection] 1415 } else if name == DefCauslationDatabase { 1416 charsetVal, ok = s.systems[CharsetDatabase] 1417 } else { 1418 // DefCauslationServer 1419 charsetVal, ok = s.systems[CharacterSetServer] 1420 } 1421 if !ok { 1422 return err 1423 } 1424 val, err2 = charset.GetDefaultDefCauslation(charsetVal) 1425 if err2 != nil { 1426 return err2 1427 } 1428 logutil.BgLogger().Warn(err.Error()) 1429 } 1430 case MilevaDBSlowLogThreshold: 1431 atomic.StoreUint64(&config.GetGlobalConfig().Log.SlowThreshold, uint64(milevadbOptInt64(val, logutil.DefaultSlowThreshold))) 1432 case MilevaDBRecordCausetInSlowLog: 1433 atomic.StoreUint32(&config.GetGlobalConfig().Log.RecordCausetInSlowLog, uint32(milevadbOptInt64(val, logutil.DefaultRecordCausetInSlowLog))) 1434 case MilevaDBEnableSlowLog: 1435 config.GetGlobalConfig().Log.EnableSlowLog = MilevaDBOptOn(val) 1436 case MilevaDBQueryLogMaxLen: 1437 atomic.StoreUint64(&config.GetGlobalConfig().Log.QueryLogMaxLen, uint64(milevadbOptInt64(val, logutil.DefaultQueryLogMaxLen))) 1438 case MilevaDBCheckMb4ValueInUTF8: 1439 config.GetGlobalConfig().CheckMb4ValueInUTF8 = MilevaDBOptOn(val) 1440 case MilevaDBFoundInCausetCache: 1441 s.FoundInCausetCache = MilevaDBOptOn(val) 1442 case MilevaDBEnableDefCauslectInterDircutionInfo: 1443 config.GetGlobalConfig().EnableDefCauslectInterDircutionInfo = MilevaDBOptOn(val) 1444 case ALLEGROSQLSelectLimit: 1445 result, err := strconv.ParseUint(val, 10, 64) 1446 if err != nil { 1447 return errors.Trace(err) 1448 } 1449 s.SelectLimit = result 1450 case MilevaDBAllowAutoRandExplicitInsert: 1451 s.AllowAutoRandExplicitInsert = MilevaDBOptOn(val) 1452 case MilevaDBEnableClusteredIndex: 1453 s.EnableClusteredIndex = MilevaDBOptOn(val) 1454 case MilevaDBPartitionPruneMode: 1455 s.PartitionPruneMode = PartitionPruneMode(strings.ToLower(strings.TrimSpace(val))) 1456 case MilevaDBEnableParallelApply: 1457 s.EnableParallelApply = MilevaDBOptOn(val) 1458 case MilevaDBSlowLogMasking, MilevaDBRedactLog: 1459 config.SetRedactLog(MilevaDBOptOn(val)) 1460 case MilevaDBShardAllocateStep: 1461 s.ShardAllocateStep = milevadbOptInt64(val, DefMilevaDBShardAllocateStep) 1462 case MilevaDBEnableChangeDeferredCausetType: 1463 s.EnableChangeDeferredCausetType = MilevaDBOptOn(val) 1464 case MilevaDBEnableAmendPessimisticTxn: 1465 s.EnableAmendPessimisticTxn = MilevaDBOptOn(val) 1466 } 1467 s.systems[name] = val 1468 return nil 1469 } 1470 1471 // GetReadableTxnMode returns the stochastik variable TxnMode but rewrites it to "OPTIMISTIC" when it's empty. 1472 func (s *StochastikVars) GetReadableTxnMode() string { 1473 txnMode := s.TxnMode 1474 if txnMode == "" { 1475 txnMode = ast.Optimistic 1476 } 1477 return txnMode 1478 } 1479 1480 func (s *StochastikVars) setTxnMode(val string) error { 1481 switch strings.ToUpper(val) { 1482 case ast.Pessimistic: 1483 s.TxnMode = ast.Pessimistic 1484 case ast.Optimistic: 1485 s.TxnMode = ast.Optimistic 1486 case "": 1487 s.TxnMode = "" 1488 default: 1489 return ErrWrongValueForVar.FastGenByArgs(MilevaDBTxnMode, val) 1490 } 1491 return nil 1492 } 1493 1494 // SetPrevStmtDigest sets the digest of the previous memex. 1495 func (s *StochastikVars) SetPrevStmtDigest(prevStmtDigest string) { 1496 s.prevStmtDigest = prevStmtDigest 1497 } 1498 1499 // GetPrevStmtDigest returns the digest of the previous memex. 1500 func (s *StochastikVars) GetPrevStmtDigest() string { 1501 // Because `prevStmt` may be truncated, so it's senseless to normalize it. 1502 // Even if `prevStmtDigest` is empty but `prevStmt` is not, just return it anyway. 1503 return s.prevStmtDigest 1504 } 1505 1506 // LazyCheckKeyNotExists returns if we can lazy check key not exists. 1507 func (s *StochastikVars) LazyCheckKeyNotExists() bool { 1508 return s.PresumeKeyNotExists || (s.TxnCtx.IsPessimistic && !s.StmtCtx.DupKeyAsWarning) 1509 } 1510 1511 // SetLocalSystemVar sets values of the local variables which in "server" scope. 1512 func SetLocalSystemVar(name string, val string) { 1513 switch name { 1514 case MilevaDBDBSReorgWorkerCount: 1515 SetDBSReorgWorkerCounter(int32(milevadbOptPositiveInt32(val, DefMilevaDBDBSReorgWorkerCount))) 1516 case MilevaDBDBSReorgBatchSize: 1517 SetDBSReorgBatchSize(int32(milevadbOptPositiveInt32(val, DefMilevaDBDBSReorgBatchSize))) 1518 case MilevaDBDBSErrorCountLimit: 1519 SetDBSErrorCountLimit(milevadbOptInt64(val, DefMilevaDBDBSErrorCountLimit)) 1520 } 1521 } 1522 1523 // special stochastik variables. 1524 const ( 1525 ALLEGROSQLModeVar = "sql_mode" 1526 CharacterSetResults = "character_set_results" 1527 MaxAllowedPacket = "max_allowed_packet" 1528 TimeZone = "time_zone" 1529 TxnIsolation = "tx_isolation" 1530 TransactionIsolation = "transaction_isolation" 1531 TxnIsolationOneShot = "tx_isolation_one_shot" 1532 MaxInterDircutionTime = "max_execution_time" 1533 ) 1534 1535 // these variables are useless for MilevaDB, but still need to validate their values for some compatible issues. 1536 // TODO: some more variables need to be added here. 1537 const ( 1538 serverReadOnly = "read_only" 1539 ) 1540 1541 var ( 1542 // TxIsolationNames are the valid values of the variable "tx_isolation" or "transaction_isolation". 1543 TxIsolationNames = map[string]struct{}{ 1544 "READ-UNCOMMITTED": {}, 1545 "READ-COMMITTED": {}, 1546 "REPEATABLE-READ": {}, 1547 "SERIALIZABLE": {}, 1548 } 1549 ) 1550 1551 // BlockDelta stands for the changed count for one causet or partition. 1552 type BlockDelta struct { 1553 Delta int64 1554 Count int64 1555 DefCausSize map[int64]int64 1556 InitTime time.Time // InitTime is the time that this delta is generated. 1557 } 1558 1559 // ConcurrencyUnset means the value the of the concurrency related variable is unset. 1560 const ConcurrencyUnset = -1 1561 1562 // Concurrency defines concurrency values. 1563 type Concurrency struct { 1564 // indexLookupConcurrency is the number of concurrent index lookup worker. 1565 // indexLookupConcurrency is deprecated, use InterlockingDirectorateConcurrency instead. 1566 indexLookupConcurrency int 1567 1568 // indexLookupJoinConcurrency is the number of concurrent index lookup join inner worker. 1569 // indexLookupJoinConcurrency is deprecated, use InterlockingDirectorateConcurrency instead. 1570 indexLookupJoinConcurrency int 1571 1572 // distALLEGROSQLScanConcurrency is the number of concurrent dist ALLEGROALLEGROSQL scan worker. 1573 // distALLEGROSQLScanConcurrency is deprecated, use InterlockingDirectorateConcurrency instead. 1574 distALLEGROSQLScanConcurrency int 1575 1576 // hashJoinConcurrency is the number of concurrent hash join outer worker. 1577 // hashJoinConcurrency is deprecated, use InterlockingDirectorateConcurrency instead. 1578 hashJoinConcurrency int 1579 1580 // projectionConcurrency is the number of concurrent projection worker. 1581 // projectionConcurrency is deprecated, use InterlockingDirectorateConcurrency instead. 1582 projectionConcurrency int 1583 1584 // hashAggPartialConcurrency is the number of concurrent hash aggregation partial worker. 1585 // hashAggPartialConcurrency is deprecated, use InterlockingDirectorateConcurrency instead. 1586 hashAggPartialConcurrency int 1587 1588 // hashAggFinalConcurrency is the number of concurrent hash aggregation final worker. 1589 // hashAggFinalConcurrency is deprecated, use InterlockingDirectorateConcurrency instead. 1590 hashAggFinalConcurrency int 1591 1592 // windowConcurrency is the number of concurrent window worker. 1593 // windowConcurrency is deprecated, use InterlockingDirectorateConcurrency instead. 1594 windowConcurrency int 1595 1596 // indexSerialScanConcurrency is the number of concurrent index serial scan worker. 1597 indexSerialScanConcurrency int 1598 1599 // InterlockingDirectorateConcurrency is the number of concurrent worker for all interlocks. 1600 InterlockingDirectorateConcurrency int 1601 } 1602 1603 // SetIndexLookupConcurrency set the number of concurrent index lookup worker. 1604 func (c *Concurrency) SetIndexLookupConcurrency(n int) { 1605 c.indexLookupConcurrency = n 1606 } 1607 1608 // SetIndexLookupJoinConcurrency set the number of concurrent index lookup join inner worker. 1609 func (c *Concurrency) SetIndexLookupJoinConcurrency(n int) { 1610 c.indexLookupJoinConcurrency = n 1611 } 1612 1613 // SetDistALLEGROSQLScanConcurrency set the number of concurrent dist ALLEGROALLEGROSQL scan worker. 1614 func (c *Concurrency) SetDistALLEGROSQLScanConcurrency(n int) { 1615 c.distALLEGROSQLScanConcurrency = n 1616 } 1617 1618 // SetHashJoinConcurrency set the number of concurrent hash join outer worker. 1619 func (c *Concurrency) SetHashJoinConcurrency(n int) { 1620 c.hashJoinConcurrency = n 1621 } 1622 1623 // SetProjectionConcurrency set the number of concurrent projection worker. 1624 func (c *Concurrency) SetProjectionConcurrency(n int) { 1625 c.projectionConcurrency = n 1626 } 1627 1628 // SetHashAggPartialConcurrency set the number of concurrent hash aggregation partial worker. 1629 func (c *Concurrency) SetHashAggPartialConcurrency(n int) { 1630 c.hashAggPartialConcurrency = n 1631 } 1632 1633 // SetHashAggFinalConcurrency set the number of concurrent hash aggregation final worker. 1634 func (c *Concurrency) SetHashAggFinalConcurrency(n int) { 1635 c.hashAggFinalConcurrency = n 1636 } 1637 1638 // SetWindowConcurrency set the number of concurrent window worker. 1639 func (c *Concurrency) SetWindowConcurrency(n int) { 1640 c.windowConcurrency = n 1641 } 1642 1643 // SetIndexSerialScanConcurrency set the number of concurrent index serial scan worker. 1644 func (c *Concurrency) SetIndexSerialScanConcurrency(n int) { 1645 c.indexSerialScanConcurrency = n 1646 } 1647 1648 // IndexLookupConcurrency return the number of concurrent index lookup worker. 1649 func (c *Concurrency) IndexLookupConcurrency() int { 1650 if c.indexLookupConcurrency != ConcurrencyUnset { 1651 return c.indexLookupConcurrency 1652 } 1653 return c.InterlockingDirectorateConcurrency 1654 } 1655 1656 // IndexLookupJoinConcurrency return the number of concurrent index lookup join inner worker. 1657 func (c *Concurrency) IndexLookupJoinConcurrency() int { 1658 if c.indexLookupJoinConcurrency != ConcurrencyUnset { 1659 return c.indexLookupJoinConcurrency 1660 } 1661 return c.InterlockingDirectorateConcurrency 1662 } 1663 1664 // DistALLEGROSQLScanConcurrency return the number of concurrent dist ALLEGROALLEGROSQL scan worker. 1665 func (c *Concurrency) DistALLEGROSQLScanConcurrency() int { 1666 return c.distALLEGROSQLScanConcurrency 1667 } 1668 1669 // HashJoinConcurrency return the number of concurrent hash join outer worker. 1670 func (c *Concurrency) HashJoinConcurrency() int { 1671 if c.hashJoinConcurrency != ConcurrencyUnset { 1672 return c.hashJoinConcurrency 1673 } 1674 return c.InterlockingDirectorateConcurrency 1675 } 1676 1677 // ProjectionConcurrency return the number of concurrent projection worker. 1678 func (c *Concurrency) ProjectionConcurrency() int { 1679 if c.projectionConcurrency != ConcurrencyUnset { 1680 return c.projectionConcurrency 1681 } 1682 return c.InterlockingDirectorateConcurrency 1683 } 1684 1685 // HashAggPartialConcurrency return the number of concurrent hash aggregation partial worker. 1686 func (c *Concurrency) HashAggPartialConcurrency() int { 1687 if c.hashAggPartialConcurrency != ConcurrencyUnset { 1688 return c.hashAggPartialConcurrency 1689 } 1690 return c.InterlockingDirectorateConcurrency 1691 } 1692 1693 // HashAggFinalConcurrency return the number of concurrent hash aggregation final worker. 1694 func (c *Concurrency) HashAggFinalConcurrency() int { 1695 if c.hashAggFinalConcurrency != ConcurrencyUnset { 1696 return c.hashAggFinalConcurrency 1697 } 1698 return c.InterlockingDirectorateConcurrency 1699 } 1700 1701 // WindowConcurrency return the number of concurrent window worker. 1702 func (c *Concurrency) WindowConcurrency() int { 1703 if c.windowConcurrency != ConcurrencyUnset { 1704 return c.windowConcurrency 1705 } 1706 return c.InterlockingDirectorateConcurrency 1707 } 1708 1709 // IndexSerialScanConcurrency return the number of concurrent index serial scan worker. 1710 // This option is not sync with InterlockingDirectorateConcurrency since it's used by Analyze causet. 1711 func (c *Concurrency) IndexSerialScanConcurrency() int { 1712 return c.indexSerialScanConcurrency 1713 } 1714 1715 // UnionConcurrency return the num of concurrent union worker. 1716 func (c *Concurrency) UnionConcurrency() int { 1717 return c.InterlockingDirectorateConcurrency 1718 } 1719 1720 // MemQuota defines memory quota values. 1721 type MemQuota struct { 1722 // MemQuotaQuery defines the memory quota for a query. 1723 MemQuotaQuery int64 1724 1725 // NestedLoopJoinCacheCapacity defines the memory capacity for apply cache. 1726 NestedLoopJoinCacheCapacity int64 1727 1728 // The variables below do not take any effect anymore, it's remaining for compatibility. 1729 // TODO: remove them in v4.1 1730 // MemQuotaHashJoin defines the memory quota for a hash join interlock. 1731 MemQuotaHashJoin int64 1732 // MemQuotaMergeJoin defines the memory quota for a merge join interlock. 1733 MemQuotaMergeJoin int64 1734 // MemQuotaSort defines the memory quota for a sort interlock. 1735 MemQuotaSort int64 1736 // MemQuotaTopn defines the memory quota for a top n interlock. 1737 MemQuotaTopn int64 1738 // MemQuotaIndexLookupReader defines the memory quota for a index lookup reader interlock. 1739 MemQuotaIndexLookupReader int64 1740 // MemQuotaIndexLookupJoin defines the memory quota for a index lookup join interlock. 1741 MemQuotaIndexLookupJoin int64 1742 // MemQuotaNestedLoopApply defines the memory quota for a nested loop apply interlock. 1743 MemQuotaNestedLoopApply int64 1744 // MemQuotaDistALLEGROSQL defines the memory quota for all operators in DistALLEGROSQL layer like co-processor and selectResult. 1745 MemQuotaDistALLEGROSQL int64 1746 } 1747 1748 // BatchSize defines batch size values. 1749 type BatchSize struct { 1750 // IndexJoinBatchSize is the batch size of a index lookup join. 1751 IndexJoinBatchSize int 1752 1753 // IndexLookupSize is the number of handles for an index lookup task in index double read interlock. 1754 IndexLookupSize int 1755 1756 // InitChunkSize defines init event count of a Chunk during query execution. 1757 InitChunkSize int 1758 1759 // MaxChunkSize defines max event count of a Chunk during query execution. 1760 MaxChunkSize int 1761 } 1762 1763 const ( 1764 // SlowLogRowPrefixStr is slow log event prefix. 1765 SlowLogRowPrefixStr = "# " 1766 // SlowLogSpaceMarkStr is slow log space mark. 1767 SlowLogSpaceMarkStr = ": " 1768 // SlowLogALLEGROSQLSuffixStr is slow log suffix. 1769 SlowLogALLEGROSQLSuffixStr = ";" 1770 // SlowLogTimeStr is slow log field name. 1771 SlowLogTimeStr = "Time" 1772 // SlowLogStartPrefixStr is slow log start event prefix. 1773 SlowLogStartPrefixStr = SlowLogRowPrefixStr + SlowLogTimeStr + SlowLogSpaceMarkStr 1774 // SlowLogTxnStartTSStr is slow log field name. 1775 SlowLogTxnStartTSStr = "Txn_start_ts" 1776 // SlowLogUserAndHostStr is the user and host field name, which is compatible with MyALLEGROSQL. 1777 SlowLogUserAndHostStr = "User@Host" 1778 // SlowLogUserStr is slow log field name. 1779 SlowLogUserStr = "User" 1780 // SlowLogHostStr only for slow_query causet usage. 1781 SlowLogHostStr = "Host" 1782 // SlowLogConnIDStr is slow log field name. 1783 SlowLogConnIDStr = "Conn_ID" 1784 // SlowLogQueryTimeStr is slow log field name. 1785 SlowLogQueryTimeStr = "Query_time" 1786 // SlowLogParseTimeStr is the parse allegrosql time. 1787 SlowLogParseTimeStr = "Parse_time" 1788 // SlowLogCompileTimeStr is the compile plan time. 1789 SlowLogCompileTimeStr = "Compile_time" 1790 // SlowLogRewriteTimeStr is the rewrite time. 1791 SlowLogRewriteTimeStr = "Rewrite_time" 1792 // SlowLogOptimizeTimeStr is the optimization time. 1793 SlowLogOptimizeTimeStr = "Optimize_time" 1794 // SlowLogWaitTSTimeStr is the time of waiting TS. 1795 SlowLogWaitTSTimeStr = "Wait_TS" 1796 // SlowLogPreprocSubQueriesStr is the number of pre-processed sub-queries. 1797 SlowLogPreprocSubQueriesStr = "Preproc_subqueries" 1798 // SlowLogPreProcSubQueryTimeStr is the total time of pre-processing sub-queries. 1799 SlowLogPreProcSubQueryTimeStr = "Preproc_subqueries_time" 1800 // SlowLogDBStr is slow log field name. 1801 SlowLogDBStr = "EDB" 1802 // SlowLogIsInternalStr is slow log field name. 1803 SlowLogIsInternalStr = "Is_internal" 1804 // SlowLogIndexNamesStr is slow log field name. 1805 SlowLogIndexNamesStr = "Index_names" 1806 // SlowLogDigestStr is slow log field name. 1807 SlowLogDigestStr = "Digest" 1808 // SlowLogQueryALLEGROSQLStr is slow log field name. 1809 SlowLogQueryALLEGROSQLStr = "Query" // use for slow log causet, slow log will not print this field name but print allegrosql directly. 1810 // SlowLogStatsInfoStr is plan stats info. 1811 SlowLogStatsInfoStr = "Stats" 1812 // SlowLogNumCausetTasksStr is the number of cop-tasks. 1813 SlowLogNumCausetTasksStr = "Num_cop_tasks" 1814 // SlowLogCopProcAvg is the average process time of all cop-tasks. 1815 SlowLogCopProcAvg = "Cop_proc_avg" 1816 // SlowLogCopProcP90 is the p90 process time of all cop-tasks. 1817 SlowLogCopProcP90 = "Cop_proc_p90" 1818 // SlowLogCopProcMax is the max process time of all cop-tasks. 1819 SlowLogCopProcMax = "Cop_proc_max" 1820 // SlowLogCopProcAddr is the address of EinsteinDB where the cop-task which cost max process time run. 1821 SlowLogCopProcAddr = "Cop_proc_addr" 1822 // SlowLogCopWaitAvg is the average wait time of all cop-tasks. 1823 SlowLogCopWaitAvg = "Cop_wait_avg" 1824 // SlowLogCopWaitP90 is the p90 wait time of all cop-tasks. 1825 SlowLogCopWaitP90 = "Cop_wait_p90" 1826 // SlowLogCopWaitMax is the max wait time of all cop-tasks. 1827 SlowLogCopWaitMax = "Cop_wait_max" 1828 // SlowLogCopWaitAddr is the address of EinsteinDB where the cop-task which cost wait process time run. 1829 SlowLogCopWaitAddr = "Cop_wait_addr" 1830 // SlowLogCopBackoffPrefix contains backoff information. 1831 SlowLogCopBackoffPrefix = "Cop_backoff_" 1832 // SlowLogMemMax is the max number bytes of memory used in this memex. 1833 SlowLogMemMax = "Mem_max" 1834 // SlowLogDiskMax is the nax number bytes of disk used in this memex. 1835 SlowLogDiskMax = "Disk_max" 1836 // SlowLogPrepared is used to indicate whether this allegrosql execute in prepare. 1837 SlowLogPrepared = "Prepared" 1838 // SlowLogCausetFromCache is used to indicate whether this plan is from plan cache. 1839 SlowLogCausetFromCache = "Causet_from_cache" 1840 // SlowLogHasMoreResults is used to indicate whether this allegrosql has more following results. 1841 SlowLogHasMoreResults = "Has_more_results" 1842 // SlowLogSucc is used to indicate whether this allegrosql execute successfully. 1843 SlowLogSucc = "Succ" 1844 // SlowLogPrevStmt is used to show the previous executed memex. 1845 SlowLogPrevStmt = "Prev_stmt" 1846 // SlowLogCauset is used to record the query plan. 1847 SlowLogCauset = "Causet" 1848 // SlowLogCausetDigest is used to record the query plan digest. 1849 SlowLogCausetDigest = "Causet_digest" 1850 // SlowLogCausetPrefix is the prefix of the plan value. 1851 SlowLogCausetPrefix = ast.MilevaDBDecodeCauset + "('" 1852 // SlowLogCausetSuffix is the suffix of the plan value. 1853 SlowLogCausetSuffix = "')" 1854 // SlowLogPrevStmtPrefix is the prefix of Prev_stmt in slow log file. 1855 SlowLogPrevStmtPrefix = SlowLogPrevStmt + SlowLogSpaceMarkStr 1856 // SlowLogKVTotal is the total time waiting for ekv. 1857 SlowLogKVTotal = "KV_total" 1858 // SlowLogFIDelTotal is the total time waiting for fidel. 1859 SlowLogFIDelTotal = "FIDel_total" 1860 // SlowLogBackoffTotal is the total time doing backoff. 1861 SlowLogBackoffTotal = "Backoff_total" 1862 // SlowLogWriteALLEGROSQLRespTotal is the total time used to write response to client. 1863 SlowLogWriteALLEGROSQLRespTotal = "Write_sql_response_total" 1864 // SlowLogInterDircRetryCount is the execution retry count. 1865 SlowLogInterDircRetryCount = "InterDirc_retry_count" 1866 // SlowLogInterDircRetryTime is the execution retry time. 1867 SlowLogInterDircRetryTime = "InterDirc_retry_time" 1868 ) 1869 1870 // SlowQueryLogItems is a defCauslection of items that should be included in the 1871 // slow query log. 1872 type SlowQueryLogItems struct { 1873 TxnTS uint64 1874 ALLEGROALLEGROSQL string 1875 Digest string 1876 TimeTotal time.Duration 1877 TimeParse time.Duration 1878 TimeCompile time.Duration 1879 TimeOptimize time.Duration 1880 TimeWaitTS time.Duration 1881 IndexNames string 1882 StatsInfos map[string]uint64 1883 CausetTasks *stmtctx.CausetTasksDetails 1884 InterDircDetail execdetails.InterDircDetails 1885 MemMax int64 1886 DiskMax int64 1887 Succ bool 1888 Prepared bool 1889 CausetFromCache bool 1890 HasMoreResults bool 1891 PrevStmt string 1892 Causet string 1893 CausetDigest string 1894 RewriteInfo RewritePhaseInfo 1895 KVTotal time.Duration 1896 FIDelTotal time.Duration 1897 BackoffTotal time.Duration 1898 WriteALLEGROSQLRespTotal time.Duration 1899 InterDircRetryCount uint 1900 InterDircRetryTime time.Duration 1901 } 1902 1903 // SlowLogFormat uses for formatting slow log. 1904 // The slow log output is like below: 1905 // # Time: 2020-04-28T15:24:04.309074+08:00 1906 // # Txn_start_ts: 406315658548871171 1907 // # User@Host: root[root] @ localhost [127.0.0.1] 1908 // # Conn_ID: 6 1909 // # Query_time: 4.895492 1910 // # Process_time: 0.161 Request_count: 1 Total_keys: 100001 Processed_keys: 100000 1911 // # EDB: test 1912 // # Index_names: [t1.idx1,t2.idx2] 1913 // # Is_internal: false 1914 // # Digest: 42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772 1915 // # Stats: t1:1,t2:2 1916 // # Num_cop_tasks: 10 1917 // # Cop_process: Avg_time: 1s P90_time: 2s Max_time: 3s Max_addr: 10.6.131.78 1918 // # Cop_wait: Avg_time: 10ms P90_time: 20ms Max_time: 30ms Max_Addr: 10.6.131.79 1919 // # Memory_max: 4096 1920 // # Disk_max: 65535 1921 // # Succ: true 1922 // # Prev_stmt: begin; 1923 // select * from t_slim; 1924 func (s *StochastikVars) SlowLogFormat(logItems *SlowQueryLogItems) string { 1925 var buf bytes.Buffer 1926 1927 writeSlowLogItem(&buf, SlowLogTxnStartTSStr, strconv.FormatUint(logItems.TxnTS, 10)) 1928 if s.User != nil { 1929 hostAddress := s.User.Hostname 1930 if s.ConnectionInfo != nil { 1931 hostAddress = s.ConnectionInfo.ClientIP 1932 } 1933 writeSlowLogItem(&buf, SlowLogUserAndHostStr, fmt.Sprintf("%s[%s] @ %s [%s]", s.User.Username, s.User.Username, s.User.Hostname, hostAddress)) 1934 } 1935 if s.ConnectionID != 0 { 1936 writeSlowLogItem(&buf, SlowLogConnIDStr, strconv.FormatUint(s.ConnectionID, 10)) 1937 } 1938 if logItems.InterDircRetryCount > 0 { 1939 buf.WriteString(SlowLogRowPrefixStr) 1940 buf.WriteString(SlowLogInterDircRetryTime) 1941 buf.WriteString(SlowLogSpaceMarkStr) 1942 buf.WriteString(strconv.FormatFloat(logItems.InterDircRetryTime.Seconds(), 'f', -1, 64)) 1943 buf.WriteString(" ") 1944 buf.WriteString(SlowLogInterDircRetryCount) 1945 buf.WriteString(SlowLogSpaceMarkStr) 1946 buf.WriteString(strconv.Itoa(int(logItems.InterDircRetryCount))) 1947 buf.WriteString("\n") 1948 } 1949 writeSlowLogItem(&buf, SlowLogQueryTimeStr, strconv.FormatFloat(logItems.TimeTotal.Seconds(), 'f', -1, 64)) 1950 writeSlowLogItem(&buf, SlowLogParseTimeStr, strconv.FormatFloat(logItems.TimeParse.Seconds(), 'f', -1, 64)) 1951 writeSlowLogItem(&buf, SlowLogCompileTimeStr, strconv.FormatFloat(logItems.TimeCompile.Seconds(), 'f', -1, 64)) 1952 1953 buf.WriteString(SlowLogRowPrefixStr + fmt.Sprintf("%v%v%v", SlowLogRewriteTimeStr, 1954 SlowLogSpaceMarkStr, strconv.FormatFloat(logItems.RewriteInfo.DurationRewrite.Seconds(), 'f', -1, 64))) 1955 if logItems.RewriteInfo.PreprocessSubQueries > 0 { 1956 buf.WriteString(fmt.Sprintf(" %v%v%v %v%v%v", SlowLogPreprocSubQueriesStr, SlowLogSpaceMarkStr, logItems.RewriteInfo.PreprocessSubQueries, 1957 SlowLogPreProcSubQueryTimeStr, SlowLogSpaceMarkStr, strconv.FormatFloat(logItems.RewriteInfo.DurationPreprocessSubQuery.Seconds(), 'f', -1, 64))) 1958 } 1959 buf.WriteString("\n") 1960 1961 writeSlowLogItem(&buf, SlowLogOptimizeTimeStr, strconv.FormatFloat(logItems.TimeOptimize.Seconds(), 'f', -1, 64)) 1962 writeSlowLogItem(&buf, SlowLogWaitTSTimeStr, strconv.FormatFloat(logItems.TimeWaitTS.Seconds(), 'f', -1, 64)) 1963 1964 if execDetailStr := logItems.InterDircDetail.String(); len(execDetailStr) > 0 { 1965 buf.WriteString(SlowLogRowPrefixStr + execDetailStr + "\n") 1966 } 1967 1968 if len(s.CurrentDB) > 0 { 1969 writeSlowLogItem(&buf, SlowLogDBStr, s.CurrentDB) 1970 } 1971 if len(logItems.IndexNames) > 0 { 1972 writeSlowLogItem(&buf, SlowLogIndexNamesStr, logItems.IndexNames) 1973 } 1974 1975 writeSlowLogItem(&buf, SlowLogIsInternalStr, strconv.FormatBool(s.InRestrictedALLEGROSQL)) 1976 if len(logItems.Digest) > 0 { 1977 writeSlowLogItem(&buf, SlowLogDigestStr, logItems.Digest) 1978 } 1979 if len(logItems.StatsInfos) > 0 { 1980 buf.WriteString(SlowLogRowPrefixStr + SlowLogStatsInfoStr + SlowLogSpaceMarkStr) 1981 firstComma := false 1982 vStr := "" 1983 for k, v := range logItems.StatsInfos { 1984 if v == 0 { 1985 vStr = "pseudo" 1986 } else { 1987 vStr = strconv.FormatUint(v, 10) 1988 1989 } 1990 if firstComma { 1991 buf.WriteString("," + k + ":" + vStr) 1992 } else { 1993 buf.WriteString(k + ":" + vStr) 1994 firstComma = true 1995 } 1996 } 1997 buf.WriteString("\n") 1998 } 1999 if logItems.CausetTasks != nil { 2000 writeSlowLogItem(&buf, SlowLogNumCausetTasksStr, strconv.FormatInt(int64(logItems.CausetTasks.NumCausetTasks), 10)) 2001 if logItems.CausetTasks.NumCausetTasks > 0 { 2002 // make the result sblock 2003 backoffs := make([]string, 0, 3) 2004 for backoff := range logItems.CausetTasks.TotBackoffTimes { 2005 backoffs = append(backoffs, backoff) 2006 } 2007 sort.Strings(backoffs) 2008 2009 if logItems.CausetTasks.NumCausetTasks == 1 { 2010 buf.WriteString(SlowLogRowPrefixStr + fmt.Sprintf("%v%v%v %v%v%v", 2011 SlowLogCopProcAvg, SlowLogSpaceMarkStr, logItems.CausetTasks.AvgProcessTime.Seconds(), 2012 SlowLogCopProcAddr, SlowLogSpaceMarkStr, logItems.CausetTasks.MaxProcessAddress) + "\n") 2013 buf.WriteString(SlowLogRowPrefixStr + fmt.Sprintf("%v%v%v %v%v%v", 2014 SlowLogCopWaitAvg, SlowLogSpaceMarkStr, logItems.CausetTasks.AvgWaitTime.Seconds(), 2015 SlowLogCopWaitAddr, SlowLogSpaceMarkStr, logItems.CausetTasks.MaxWaitAddress) + "\n") 2016 for _, backoff := range backoffs { 2017 backoffPrefix := SlowLogCopBackoffPrefix + backoff + "_" 2018 buf.WriteString(SlowLogRowPrefixStr + fmt.Sprintf("%v%v%v %v%v%v\n", 2019 backoffPrefix+"total_times", SlowLogSpaceMarkStr, logItems.CausetTasks.TotBackoffTimes[backoff], 2020 backoffPrefix+"total_time", SlowLogSpaceMarkStr, logItems.CausetTasks.TotBackoffTime[backoff].Seconds(), 2021 )) 2022 } 2023 } else { 2024 buf.WriteString(SlowLogRowPrefixStr + fmt.Sprintf("%v%v%v %v%v%v %v%v%v %v%v%v", 2025 SlowLogCopProcAvg, SlowLogSpaceMarkStr, logItems.CausetTasks.AvgProcessTime.Seconds(), 2026 SlowLogCopProcP90, SlowLogSpaceMarkStr, logItems.CausetTasks.P90ProcessTime.Seconds(), 2027 SlowLogCopProcMax, SlowLogSpaceMarkStr, logItems.CausetTasks.MaxProcessTime.Seconds(), 2028 SlowLogCopProcAddr, SlowLogSpaceMarkStr, logItems.CausetTasks.MaxProcessAddress) + "\n") 2029 buf.WriteString(SlowLogRowPrefixStr + fmt.Sprintf("%v%v%v %v%v%v %v%v%v %v%v%v", 2030 SlowLogCopWaitAvg, SlowLogSpaceMarkStr, logItems.CausetTasks.AvgWaitTime.Seconds(), 2031 SlowLogCopWaitP90, SlowLogSpaceMarkStr, logItems.CausetTasks.P90WaitTime.Seconds(), 2032 SlowLogCopWaitMax, SlowLogSpaceMarkStr, logItems.CausetTasks.MaxWaitTime.Seconds(), 2033 SlowLogCopWaitAddr, SlowLogSpaceMarkStr, logItems.CausetTasks.MaxWaitAddress) + "\n") 2034 for _, backoff := range backoffs { 2035 backoffPrefix := SlowLogCopBackoffPrefix + backoff + "_" 2036 buf.WriteString(SlowLogRowPrefixStr + fmt.Sprintf("%v%v%v %v%v%v %v%v%v %v%v%v %v%v%v %v%v%v\n", 2037 backoffPrefix+"total_times", SlowLogSpaceMarkStr, logItems.CausetTasks.TotBackoffTimes[backoff], 2038 backoffPrefix+"total_time", SlowLogSpaceMarkStr, logItems.CausetTasks.TotBackoffTime[backoff].Seconds(), 2039 backoffPrefix+"max_time", SlowLogSpaceMarkStr, logItems.CausetTasks.MaxBackoffTime[backoff].Seconds(), 2040 backoffPrefix+"max_addr", SlowLogSpaceMarkStr, logItems.CausetTasks.MaxBackoffAddress[backoff], 2041 backoffPrefix+"avg_time", SlowLogSpaceMarkStr, logItems.CausetTasks.AvgBackoffTime[backoff].Seconds(), 2042 backoffPrefix+"p90_time", SlowLogSpaceMarkStr, logItems.CausetTasks.P90BackoffTime[backoff].Seconds(), 2043 )) 2044 } 2045 } 2046 } 2047 } 2048 if logItems.MemMax > 0 { 2049 writeSlowLogItem(&buf, SlowLogMemMax, strconv.FormatInt(logItems.MemMax, 10)) 2050 } 2051 if logItems.DiskMax > 0 { 2052 writeSlowLogItem(&buf, SlowLogDiskMax, strconv.FormatInt(logItems.DiskMax, 10)) 2053 } 2054 2055 writeSlowLogItem(&buf, SlowLogPrepared, strconv.FormatBool(logItems.Prepared)) 2056 writeSlowLogItem(&buf, SlowLogCausetFromCache, strconv.FormatBool(logItems.CausetFromCache)) 2057 writeSlowLogItem(&buf, SlowLogHasMoreResults, strconv.FormatBool(logItems.HasMoreResults)) 2058 writeSlowLogItem(&buf, SlowLogKVTotal, strconv.FormatFloat(logItems.KVTotal.Seconds(), 'f', -1, 64)) 2059 writeSlowLogItem(&buf, SlowLogFIDelTotal, strconv.FormatFloat(logItems.FIDelTotal.Seconds(), 'f', -1, 64)) 2060 writeSlowLogItem(&buf, SlowLogBackoffTotal, strconv.FormatFloat(logItems.BackoffTotal.Seconds(), 'f', -1, 64)) 2061 writeSlowLogItem(&buf, SlowLogWriteALLEGROSQLRespTotal, strconv.FormatFloat(logItems.WriteALLEGROSQLRespTotal.Seconds(), 'f', -1, 64)) 2062 writeSlowLogItem(&buf, SlowLogSucc, strconv.FormatBool(logItems.Succ)) 2063 if len(logItems.Causet) != 0 { 2064 writeSlowLogItem(&buf, SlowLogCauset, logItems.Causet) 2065 } 2066 if len(logItems.CausetDigest) != 0 { 2067 writeSlowLogItem(&buf, SlowLogCausetDigest, logItems.CausetDigest) 2068 } 2069 2070 if logItems.PrevStmt != "" { 2071 writeSlowLogItem(&buf, SlowLogPrevStmt, logItems.PrevStmt) 2072 } 2073 2074 if s.CurrentDBChanged { 2075 buf.WriteString(fmt.Sprintf("use %s;\n", s.CurrentDB)) 2076 s.CurrentDBChanged = false 2077 } 2078 2079 buf.WriteString(logItems.ALLEGROALLEGROSQL) 2080 if len(logItems.ALLEGROALLEGROSQL) == 0 || logItems.ALLEGROALLEGROSQL[len(logItems.ALLEGROALLEGROSQL)-1] != ';' { 2081 buf.WriteString(";") 2082 } 2083 return buf.String() 2084 } 2085 2086 // writeSlowLogItem writes a slow log item in the form of: "# ${key}:${value}" 2087 func writeSlowLogItem(buf *bytes.Buffer, key, value string) { 2088 buf.WriteString(SlowLogRowPrefixStr + key + SlowLogSpaceMarkStr + value + "\n") 2089 }