github.com/whtcorpsinc/milevadb-prod@v0.0.0-20211104133533-f57f4be3b597/causetstore/stochastikctx/stmtctx/stmtctx.go (about) 1 // Copyright 2020 WHTCORPS INC, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 package stmtctx 15 16 import ( 17 "math" 18 "sort" 19 "strconv" 20 "sync" 21 "sync/atomic" 22 "time" 23 24 "github.com/whtcorpsinc/BerolinaSQL" 25 "github.com/whtcorpsinc/BerolinaSQL/perceptron" 26 "github.com/whtcorpsinc/BerolinaSQL/allegrosql" 27 "github.com/whtcorpsinc/milevadb/soliton/disk" 28 "github.com/whtcorpsinc/milevadb/soliton/execdetails" 29 "github.com/whtcorpsinc/milevadb/soliton/memory" 30 "go.uber.org/zap" 31 ) 32 33 const ( 34 // WarnLevelError represents level "Error" for 'SHOW WARNINGS' syntax. 35 WarnLevelError = "Error" 36 // WarnLevelWarning represents level "Warning" for 'SHOW WARNINGS' syntax. 37 WarnLevelWarning = "Warning" 38 // WarnLevelNote represents level "Note" for 'SHOW WARNINGS' syntax. 39 WarnLevelNote = "Note" 40 ) 41 42 var taskIDAlloc uint64 43 44 // AllocateTaskID allocates a new unique ID for a memex execution 45 func AllocateTaskID() uint64 { 46 return atomic.AddUint64(&taskIDAlloc, 1) 47 } 48 49 // ALLEGROSQLWarn relates a allegrosql warning and it's level. 50 type ALLEGROSQLWarn struct { 51 Level string 52 Err error 53 } 54 55 // StatementContext contains variables for a memex. 56 // It should be reset before executing a memex. 57 type StatementContext struct { 58 // Set the following variables before execution 59 StmtHints 60 61 // IsDBSJobInQueue is used to mark whether the DBS job is put into the queue. 62 // If IsDBSJobInQueue is true, it means the DBS job is in the queue of storage, and it can be handled by the DBS worker. 63 IsDBSJobInQueue bool 64 InInsertStmt bool 65 InUFIDelateStmt bool 66 InDeleteStmt bool 67 InSelectStmt bool 68 InLoadDataStmt bool 69 InExplainStmt bool 70 IgnoreTruncate bool 71 IgnoreZeroInDate bool 72 DupKeyAsWarning bool 73 BadNullAsWarning bool 74 DividedByZeroAsWarning bool 75 TruncateAsWarning bool 76 OverflowAsWarning bool 77 InShowWarning bool 78 UseCache bool 79 BatchCheck bool 80 InNullRejectCheck bool 81 AllowInvalidDate bool 82 83 // mu struct holds variables that change during execution. 84 mu struct { 85 sync.Mutex 86 87 affectedRows uint64 88 foundRows uint64 89 90 /* 91 following variables are ported from 'COPY_INFO' struct of MyALLEGROSQL server source, 92 they are used to count rows for INSERT/REPLACE/UFIDelATE queries: 93 If a event is inserted then the copied variable is incremented. 94 If a event is uFIDelated by the INSERT ... ON DUPLICATE KEY UFIDelATE and the 95 new data differs from the old one then the copied and the uFIDelated 96 variables are incremented. 97 The touched variable is incremented if a event was touched by the uFIDelate part 98 of the INSERT ... ON DUPLICATE KEY UFIDelATE no matter whether the event 99 was actually changed or not. 100 101 see https://github.com/allegrosql/allegrosql-server/blob/d2029238d6d9f648077664e4cdd611e231a6dc14/allegrosql/sql_data_change.h#L60 for more details 102 */ 103 records uint64 104 uFIDelated uint64 105 copied uint64 106 touched uint64 107 108 message string 109 warnings []ALLEGROSQLWarn 110 errorCount uint16 111 histogramsNotLoad bool 112 execDetails execdetails.InterDircDetails 113 allInterDircDetails []*execdetails.InterDircDetails 114 } 115 // PrevAffectedRows is the affected-rows value(DBS is 0, DML is the number of affected rows). 116 PrevAffectedRows int64 117 // PrevLastInsertID is the last insert ID of previous memex. 118 PrevLastInsertID uint64 119 // LastInsertID is the auto-generated ID in the current memex. 120 LastInsertID uint64 121 // InsertID is the given insert ID of an auto_increment defCausumn. 122 InsertID uint64 123 124 BaseRowID int64 125 MaxRowID int64 126 127 // Copied from StochastikVars.TimeZone. 128 TimeZone *time.Location 129 Priority allegrosql.PriorityEnum 130 NotFillCache bool 131 MemTracker *memory.Tracker 132 DiskTracker *disk.Tracker 133 RuntimeStatsDefCausl *execdetails.RuntimeStatsDefCausl 134 BlockIDs []int64 135 IndexNames []string 136 nowTs time.Time // use this variable for now/current_timestamp calculation/cache for one stmt 137 stmtTimeCached bool 138 StmtType string 139 OriginalALLEGROSQL string 140 digestMemo struct { 141 sync.Once 142 normalized string 143 digest string 144 } 145 // planNormalized use for cache the normalized plan, avoid duplicate builds. 146 planNormalized string 147 planDigest string 148 Blocks []BlockEntry 149 PointInterDirc bool // for point uFIDelate cached execution, Constant memex need to set "paramMarker" 150 lockWaitStartTime int64 // LockWaitStartTime stores the pessimistic dagger wait start time 151 PessimisticLockWaited int32 152 LockKeysDuration int64 153 LockKeysCount int32 154 TblInfo2UnionScan map[*perceptron.BlockInfo]bool 155 TaskID uint64 // unique ID for an execution of a memex 156 TaskMapBakTS uint64 // counter for 157 } 158 159 // StmtHints are StochastikVars related allegrosql hints. 160 type StmtHints struct { 161 // Hint Information 162 MemQuotaQuery int64 163 ApplyCacheCapacity int64 164 MaxInterDircutionTime uint64 165 ReplicaRead byte 166 AllowInSubqToJoinAnPosetDagg bool 167 NoIndexMergeHint bool 168 // EnableCascadesCausetAppend is use cascades causet for a single query only. 169 EnableCascadesCausetAppend bool 170 // ForceNthCauset indicates the CausetCounterTp number for finding physical plan. 171 // -1 for disable. 172 ForceNthCauset int64 173 174 // Hint flags 175 HasAllowInSubqToJoinAnPosetDaggHint bool 176 HasMemQuotaHint bool 177 HasReplicaReadHint bool 178 HasMaxInterDircutionTime bool 179 HasEnableCascadesCausetAppendHint bool 180 } 181 182 // TaskMapNeedBackUp indicates that whether we need to back up taskMap during physical optimizing. 183 func (sh *StmtHints) TaskMapNeedBackUp() bool { 184 return sh.ForceNthCauset != -1 185 } 186 187 // GetNowTsCached getter for nowTs, if not set get now time and cache it 188 func (sc *StatementContext) GetNowTsCached() time.Time { 189 if !sc.stmtTimeCached { 190 now := time.Now() 191 sc.nowTs = now 192 sc.stmtTimeCached = true 193 } 194 return sc.nowTs 195 } 196 197 // ResetNowTs resetter for nowTs, clear cached time flag 198 func (sc *StatementContext) ResetNowTs() { 199 sc.stmtTimeCached = false 200 } 201 202 // ALLEGROSQLDigest gets normalized and digest for provided allegrosql. 203 // it will cache result after first calling. 204 func (sc *StatementContext) ALLEGROSQLDigest() (normalized, sqlDigest string) { 205 sc.digestMemo.Do(func() { 206 sc.digestMemo.normalized, sc.digestMemo.digest = BerolinaSQL.NormalizeDigest(sc.OriginalALLEGROSQL) 207 }) 208 return sc.digestMemo.normalized, sc.digestMemo.digest 209 } 210 211 // InitALLEGROSQLDigest sets the normalized and digest for allegrosql. 212 func (sc *StatementContext) InitALLEGROSQLDigest(normalized, digest string) { 213 sc.digestMemo.Do(func() { 214 sc.digestMemo.normalized, sc.digestMemo.digest = normalized, digest 215 }) 216 } 217 218 // GetCausetDigest gets the normalized plan and plan digest. 219 func (sc *StatementContext) GetCausetDigest() (normalized, planDigest string) { 220 return sc.planNormalized, sc.planDigest 221 } 222 223 // SetCausetDigest sets the normalized plan and plan digest. 224 func (sc *StatementContext) SetCausetDigest(normalized, planDigest string) { 225 sc.planNormalized, sc.planDigest = normalized, planDigest 226 } 227 228 // BlockEntry presents causet in EDB. 229 type BlockEntry struct { 230 EDB string 231 Block string 232 } 233 234 // AddAffectedRows adds affected rows. 235 func (sc *StatementContext) AddAffectedRows(rows uint64) { 236 sc.mu.Lock() 237 sc.mu.affectedRows += rows 238 sc.mu.Unlock() 239 } 240 241 // AffectedRows gets affected rows. 242 func (sc *StatementContext) AffectedRows() uint64 { 243 sc.mu.Lock() 244 rows := sc.mu.affectedRows 245 sc.mu.Unlock() 246 return rows 247 } 248 249 // FoundRows gets found rows. 250 func (sc *StatementContext) FoundRows() uint64 { 251 sc.mu.Lock() 252 rows := sc.mu.foundRows 253 sc.mu.Unlock() 254 return rows 255 } 256 257 // AddFoundRows adds found rows. 258 func (sc *StatementContext) AddFoundRows(rows uint64) { 259 sc.mu.Lock() 260 sc.mu.foundRows += rows 261 sc.mu.Unlock() 262 } 263 264 // RecordRows is used to generate info message 265 func (sc *StatementContext) RecordRows() uint64 { 266 sc.mu.Lock() 267 rows := sc.mu.records 268 sc.mu.Unlock() 269 return rows 270 } 271 272 // AddRecordRows adds record rows. 273 func (sc *StatementContext) AddRecordRows(rows uint64) { 274 sc.mu.Lock() 275 sc.mu.records += rows 276 sc.mu.Unlock() 277 } 278 279 // UFIDelatedRows is used to generate info message 280 func (sc *StatementContext) UFIDelatedRows() uint64 { 281 sc.mu.Lock() 282 rows := sc.mu.uFIDelated 283 sc.mu.Unlock() 284 return rows 285 } 286 287 // AddUFIDelatedRows adds uFIDelated rows. 288 func (sc *StatementContext) AddUFIDelatedRows(rows uint64) { 289 sc.mu.Lock() 290 sc.mu.uFIDelated += rows 291 sc.mu.Unlock() 292 } 293 294 // CopiedRows is used to generate info message 295 func (sc *StatementContext) CopiedRows() uint64 { 296 sc.mu.Lock() 297 rows := sc.mu.copied 298 sc.mu.Unlock() 299 return rows 300 } 301 302 // AddCopiedRows adds copied rows. 303 func (sc *StatementContext) AddCopiedRows(rows uint64) { 304 sc.mu.Lock() 305 sc.mu.copied += rows 306 sc.mu.Unlock() 307 } 308 309 // TouchedRows is used to generate info message 310 func (sc *StatementContext) TouchedRows() uint64 { 311 sc.mu.Lock() 312 rows := sc.mu.touched 313 sc.mu.Unlock() 314 return rows 315 } 316 317 // AddTouchedRows adds touched rows. 318 func (sc *StatementContext) AddTouchedRows(rows uint64) { 319 sc.mu.Lock() 320 sc.mu.touched += rows 321 sc.mu.Unlock() 322 } 323 324 // GetMessage returns the extra message of the last executed command, if there is no message, it returns empty string 325 func (sc *StatementContext) GetMessage() string { 326 sc.mu.Lock() 327 msg := sc.mu.message 328 sc.mu.Unlock() 329 return msg 330 } 331 332 // SetMessage sets the info message generated by some commands 333 func (sc *StatementContext) SetMessage(msg string) { 334 sc.mu.Lock() 335 sc.mu.message = msg 336 sc.mu.Unlock() 337 } 338 339 // GetWarnings gets warnings. 340 func (sc *StatementContext) GetWarnings() []ALLEGROSQLWarn { 341 sc.mu.Lock() 342 warns := make([]ALLEGROSQLWarn, len(sc.mu.warnings)) 343 copy(warns, sc.mu.warnings) 344 sc.mu.Unlock() 345 return warns 346 } 347 348 // TruncateWarnings truncates wanrings begin from start and returns the truncated warnings. 349 func (sc *StatementContext) TruncateWarnings(start int) []ALLEGROSQLWarn { 350 sc.mu.Lock() 351 defer sc.mu.Unlock() 352 sz := len(sc.mu.warnings) - start 353 if sz <= 0 { 354 return nil 355 } 356 ret := make([]ALLEGROSQLWarn, sz) 357 copy(ret, sc.mu.warnings[start:]) 358 sc.mu.warnings = sc.mu.warnings[:start] 359 return ret 360 } 361 362 // WarningCount gets warning count. 363 func (sc *StatementContext) WarningCount() uint16 { 364 if sc.InShowWarning { 365 return 0 366 } 367 sc.mu.Lock() 368 wc := uint16(len(sc.mu.warnings)) 369 sc.mu.Unlock() 370 return wc 371 } 372 373 // NumErrorWarnings gets warning and error count. 374 func (sc *StatementContext) NumErrorWarnings() (ec uint16, wc int) { 375 sc.mu.Lock() 376 ec = sc.mu.errorCount 377 wc = len(sc.mu.warnings) 378 sc.mu.Unlock() 379 return 380 } 381 382 // SetWarnings sets warnings. 383 func (sc *StatementContext) SetWarnings(warns []ALLEGROSQLWarn) { 384 sc.mu.Lock() 385 sc.mu.warnings = warns 386 for _, w := range warns { 387 if w.Level == WarnLevelError { 388 sc.mu.errorCount++ 389 } 390 } 391 sc.mu.Unlock() 392 } 393 394 // AppendWarning appends a warning with level 'Warning'. 395 func (sc *StatementContext) AppendWarning(warn error) { 396 sc.mu.Lock() 397 if len(sc.mu.warnings) < math.MaxUint16 { 398 sc.mu.warnings = append(sc.mu.warnings, ALLEGROSQLWarn{WarnLevelWarning, warn}) 399 } 400 sc.mu.Unlock() 401 } 402 403 // AppendWarnings appends some warnings. 404 func (sc *StatementContext) AppendWarnings(warns []ALLEGROSQLWarn) { 405 sc.mu.Lock() 406 if len(sc.mu.warnings) < math.MaxUint16 { 407 sc.mu.warnings = append(sc.mu.warnings, warns...) 408 } 409 sc.mu.Unlock() 410 } 411 412 // AppendNote appends a warning with level 'Note'. 413 func (sc *StatementContext) AppendNote(warn error) { 414 sc.mu.Lock() 415 if len(sc.mu.warnings) < math.MaxUint16 { 416 sc.mu.warnings = append(sc.mu.warnings, ALLEGROSQLWarn{WarnLevelNote, warn}) 417 } 418 sc.mu.Unlock() 419 } 420 421 // AppendError appends a warning with level 'Error'. 422 func (sc *StatementContext) AppendError(warn error) { 423 sc.mu.Lock() 424 if len(sc.mu.warnings) < math.MaxUint16 { 425 sc.mu.warnings = append(sc.mu.warnings, ALLEGROSQLWarn{WarnLevelError, warn}) 426 sc.mu.errorCount++ 427 } 428 sc.mu.Unlock() 429 } 430 431 // SetHistogramsNotLoad sets histogramsNotLoad. 432 func (sc *StatementContext) SetHistogramsNotLoad() { 433 sc.mu.Lock() 434 sc.mu.histogramsNotLoad = true 435 sc.mu.Unlock() 436 } 437 438 // HandleTruncate ignores or returns the error based on the StatementContext state. 439 func (sc *StatementContext) HandleTruncate(err error) error { 440 // TODO: At present we have not checked whether the error can be ignored or treated as warning. 441 // We will do that later, and then append WarnDataTruncated instead of the error itself. 442 if err == nil { 443 return nil 444 } 445 if sc.IgnoreTruncate { 446 return nil 447 } 448 if sc.TruncateAsWarning { 449 sc.AppendWarning(err) 450 return nil 451 } 452 return err 453 } 454 455 // HandleOverflow treats ErrOverflow as warnings or returns the error based on the StmtCtx.OverflowAsWarning state. 456 func (sc *StatementContext) HandleOverflow(err error, warnErr error) error { 457 if err == nil { 458 return nil 459 } 460 461 if sc.OverflowAsWarning { 462 sc.AppendWarning(warnErr) 463 return nil 464 } 465 return err 466 } 467 468 // ResetForRetry resets the changed states during execution. 469 func (sc *StatementContext) ResetForRetry() { 470 sc.mu.Lock() 471 sc.mu.affectedRows = 0 472 sc.mu.foundRows = 0 473 sc.mu.records = 0 474 sc.mu.uFIDelated = 0 475 sc.mu.copied = 0 476 sc.mu.touched = 0 477 sc.mu.message = "" 478 sc.mu.errorCount = 0 479 sc.mu.warnings = nil 480 sc.mu.execDetails = execdetails.InterDircDetails{} 481 sc.mu.allInterDircDetails = make([]*execdetails.InterDircDetails, 0, 4) 482 sc.mu.Unlock() 483 sc.MaxRowID = 0 484 sc.BaseRowID = 0 485 sc.BlockIDs = sc.BlockIDs[:0] 486 sc.IndexNames = sc.IndexNames[:0] 487 sc.TaskID = AllocateTaskID() 488 } 489 490 // MergeInterDircDetails merges a single region execution details into self, used to print 491 // the information in slow query log. 492 func (sc *StatementContext) MergeInterDircDetails(details *execdetails.InterDircDetails, commitDetails *execdetails.CommitDetails) { 493 sc.mu.Lock() 494 if details != nil { 495 sc.mu.execDetails.CopTime += details.CopTime 496 sc.mu.execDetails.ProcessTime += details.ProcessTime 497 sc.mu.execDetails.WaitTime += details.WaitTime 498 sc.mu.execDetails.BackoffTime += details.BackoffTime 499 sc.mu.execDetails.RequestCount++ 500 sc.mu.execDetails.TotalKeys += details.TotalKeys 501 sc.mu.execDetails.ProcessedKeys += details.ProcessedKeys 502 sc.mu.allInterDircDetails = append(sc.mu.allInterDircDetails, details) 503 } 504 sc.mu.execDetails.CommitDetail = commitDetails 505 sc.mu.Unlock() 506 } 507 508 // MergeLockKeysInterDircDetails merges dagger keys execution details into self. 509 func (sc *StatementContext) MergeLockKeysInterDircDetails(lockKeys *execdetails.LockKeysDetails) { 510 sc.mu.Lock() 511 if sc.mu.execDetails.LockKeysDetail == nil { 512 sc.mu.execDetails.LockKeysDetail = lockKeys 513 } else { 514 sc.mu.execDetails.LockKeysDetail.Merge(lockKeys) 515 } 516 sc.mu.Unlock() 517 } 518 519 // GetInterDircDetails gets the execution details for the memex. 520 func (sc *StatementContext) GetInterDircDetails() execdetails.InterDircDetails { 521 var details execdetails.InterDircDetails 522 sc.mu.Lock() 523 details = sc.mu.execDetails 524 details.LockKeysDuration = time.Duration(atomic.LoadInt64(&sc.LockKeysDuration)) 525 sc.mu.Unlock() 526 return details 527 } 528 529 // ShouldClipToZero indicates whether values less than 0 should be clipped to 0 for unsigned integer types. 530 // This is the case for `insert`, `uFIDelate`, `alter causet` and `load data infile` memexs, when not in strict ALLEGROALLEGROSQL mode. 531 // see https://dev.allegrosql.com/doc/refman/5.7/en/out-of-range-and-overflow.html 532 func (sc *StatementContext) ShouldClipToZero() bool { 533 // TODO: Currently altering defCausumn of integer to unsigned integer is not supported. 534 // If it is supported one day, that case should be added here. 535 return sc.InInsertStmt || sc.InLoadDataStmt || sc.InUFIDelateStmt 536 } 537 538 // ShouldIgnoreOverflowError indicates whether we should ignore the error when type conversion overflows, 539 // so we can leave it for further processing like clipping values less than 0 to 0 for unsigned integer types. 540 func (sc *StatementContext) ShouldIgnoreOverflowError() bool { 541 if (sc.InInsertStmt && sc.TruncateAsWarning) || sc.InLoadDataStmt { 542 return true 543 } 544 return false 545 } 546 547 // PushDownFlags converts StatementContext to fidelpb.SelectRequest.Flags. 548 func (sc *StatementContext) PushDownFlags() uint64 { 549 var flags uint64 550 if sc.InInsertStmt { 551 flags |= perceptron.FlagInInsertStmt 552 } else if sc.InUFIDelateStmt || sc.InDeleteStmt { 553 flags |= perceptron.FlagInUFIDelateOrDeleteStmt 554 } else if sc.InSelectStmt { 555 flags |= perceptron.FlagInSelectStmt 556 } 557 if sc.IgnoreTruncate { 558 flags |= perceptron.FlagIgnoreTruncate 559 } else if sc.TruncateAsWarning { 560 flags |= perceptron.FlagTruncateAsWarning 561 } 562 if sc.OverflowAsWarning { 563 flags |= perceptron.FlagOverflowAsWarning 564 } 565 if sc.IgnoreZeroInDate { 566 flags |= perceptron.FlagIgnoreZeroInDate 567 } 568 if sc.DividedByZeroAsWarning { 569 flags |= perceptron.FlagDividedByZeroAsWarning 570 } 571 if sc.InLoadDataStmt { 572 flags |= perceptron.FlagInLoadDataStmt 573 } 574 return flags 575 } 576 577 // CausetTasksDetails returns some useful information of cop-tasks during execution. 578 func (sc *StatementContext) CausetTasksDetails() *CausetTasksDetails { 579 sc.mu.Lock() 580 defer sc.mu.Unlock() 581 n := len(sc.mu.allInterDircDetails) 582 d := &CausetTasksDetails{ 583 NumCausetTasks: n, 584 MaxBackoffTime: make(map[string]time.Duration), 585 AvgBackoffTime: make(map[string]time.Duration), 586 P90BackoffTime: make(map[string]time.Duration), 587 TotBackoffTime: make(map[string]time.Duration), 588 TotBackoffTimes: make(map[string]int), 589 MaxBackoffAddress: make(map[string]string), 590 } 591 if n == 0 { 592 return d 593 } 594 d.AvgProcessTime = sc.mu.execDetails.ProcessTime / time.Duration(n) 595 d.AvgWaitTime = sc.mu.execDetails.WaitTime / time.Duration(n) 596 597 sort.Slice(sc.mu.allInterDircDetails, func(i, j int) bool { 598 return sc.mu.allInterDircDetails[i].ProcessTime < sc.mu.allInterDircDetails[j].ProcessTime 599 }) 600 d.P90ProcessTime = sc.mu.allInterDircDetails[n*9/10].ProcessTime 601 d.MaxProcessTime = sc.mu.allInterDircDetails[n-1].ProcessTime 602 d.MaxProcessAddress = sc.mu.allInterDircDetails[n-1].CalleeAddress 603 604 sort.Slice(sc.mu.allInterDircDetails, func(i, j int) bool { 605 return sc.mu.allInterDircDetails[i].WaitTime < sc.mu.allInterDircDetails[j].WaitTime 606 }) 607 d.P90WaitTime = sc.mu.allInterDircDetails[n*9/10].WaitTime 608 d.MaxWaitTime = sc.mu.allInterDircDetails[n-1].WaitTime 609 d.MaxWaitAddress = sc.mu.allInterDircDetails[n-1].CalleeAddress 610 611 // calculate backoff details 612 type backoffItem struct { 613 callee string 614 sleepTime time.Duration 615 times int 616 } 617 backoffInfo := make(map[string][]backoffItem) 618 for _, ed := range sc.mu.allInterDircDetails { 619 for backoff := range ed.BackoffTimes { 620 backoffInfo[backoff] = append(backoffInfo[backoff], backoffItem{ 621 callee: ed.CalleeAddress, 622 sleepTime: ed.BackoffSleep[backoff], 623 times: ed.BackoffTimes[backoff], 624 }) 625 } 626 } 627 for backoff, items := range backoffInfo { 628 if len(items) == 0 { 629 continue 630 } 631 sort.Slice(items, func(i, j int) bool { 632 return items[i].sleepTime < items[j].sleepTime 633 }) 634 n := len(items) 635 d.MaxBackoffAddress[backoff] = items[n-1].callee 636 d.MaxBackoffTime[backoff] = items[n-1].sleepTime 637 d.P90BackoffTime[backoff] = items[n*9/10].sleepTime 638 639 var totalTime time.Duration 640 totalTimes := 0 641 for _, it := range items { 642 totalTime += it.sleepTime 643 totalTimes += it.times 644 } 645 d.AvgBackoffTime[backoff] = totalTime / time.Duration(n) 646 d.TotBackoffTime[backoff] = totalTime 647 d.TotBackoffTimes[backoff] = totalTimes 648 } 649 return d 650 } 651 652 // SetFlagsFromPBFlag set the flag of StatementContext from a `fidelpb.SelectRequest.Flags`. 653 func (sc *StatementContext) SetFlagsFromPBFlag(flags uint64) { 654 sc.IgnoreTruncate = (flags & perceptron.FlagIgnoreTruncate) > 0 655 sc.TruncateAsWarning = (flags & perceptron.FlagTruncateAsWarning) > 0 656 sc.InInsertStmt = (flags & perceptron.FlagInInsertStmt) > 0 657 sc.InSelectStmt = (flags & perceptron.FlagInSelectStmt) > 0 658 sc.OverflowAsWarning = (flags & perceptron.FlagOverflowAsWarning) > 0 659 sc.IgnoreZeroInDate = (flags & perceptron.FlagIgnoreZeroInDate) > 0 660 sc.DividedByZeroAsWarning = (flags & perceptron.FlagDividedByZeroAsWarning) > 0 661 } 662 663 // GetLockWaitStartTime returns the memex pessimistic dagger wait start time 664 func (sc *StatementContext) GetLockWaitStartTime() time.Time { 665 startTime := atomic.LoadInt64(&sc.lockWaitStartTime) 666 if startTime == 0 { 667 startTime = time.Now().UnixNano() 668 atomic.StoreInt64(&sc.lockWaitStartTime, startTime) 669 } 670 return time.Unix(0, startTime) 671 } 672 673 //CausetTasksDetails defCauslects some useful information of cop-tasks during execution. 674 type CausetTasksDetails struct { 675 NumCausetTasks int 676 677 AvgProcessTime time.Duration 678 P90ProcessTime time.Duration 679 MaxProcessAddress string 680 MaxProcessTime time.Duration 681 682 AvgWaitTime time.Duration 683 P90WaitTime time.Duration 684 MaxWaitAddress string 685 MaxWaitTime time.Duration 686 687 MaxBackoffTime map[string]time.Duration 688 MaxBackoffAddress map[string]string 689 AvgBackoffTime map[string]time.Duration 690 P90BackoffTime map[string]time.Duration 691 TotBackoffTime map[string]time.Duration 692 TotBackoffTimes map[string]int 693 } 694 695 // ToZapFields wraps the CausetTasksDetails as zap.Fileds. 696 func (d *CausetTasksDetails) ToZapFields() (fields []zap.Field) { 697 if d.NumCausetTasks == 0 { 698 return 699 } 700 fields = make([]zap.Field, 0, 10) 701 fields = append(fields, zap.Int("num_cop_tasks", d.NumCausetTasks)) 702 fields = append(fields, zap.String("process_avg_time", strconv.FormatFloat(d.AvgProcessTime.Seconds(), 'f', -1, 64)+"s")) 703 fields = append(fields, zap.String("process_p90_time", strconv.FormatFloat(d.P90ProcessTime.Seconds(), 'f', -1, 64)+"s")) 704 fields = append(fields, zap.String("process_max_time", strconv.FormatFloat(d.MaxProcessTime.Seconds(), 'f', -1, 64)+"s")) 705 fields = append(fields, zap.String("process_max_addr", d.MaxProcessAddress)) 706 fields = append(fields, zap.String("wait_avg_time", strconv.FormatFloat(d.AvgWaitTime.Seconds(), 'f', -1, 64)+"s")) 707 fields = append(fields, zap.String("wait_p90_time", strconv.FormatFloat(d.P90WaitTime.Seconds(), 'f', -1, 64)+"s")) 708 fields = append(fields, zap.String("wait_max_time", strconv.FormatFloat(d.MaxWaitTime.Seconds(), 'f', -1, 64)+"s")) 709 fields = append(fields, zap.String("wait_max_addr", d.MaxWaitAddress)) 710 return fields 711 }