github.com/whtcorpsinc/milevadb-prod@v0.0.0-20211104133533-f57f4be3b597/soliton/execdetails/execdetails.go (about) 1 // Copyright 2020 WHTCORPS INC, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 package execdetails 15 16 import ( 17 "bytes" 18 "fmt" 19 "sort" 20 "strconv" 21 "strings" 22 "sync" 23 "sync/atomic" 24 "time" 25 26 "github.com/whtcorpsinc/fidelpb/go-fidelpb" 27 "go.uber.org/zap" 28 ) 29 30 type commitDetailCtxKeyType struct{} 31 type lockKeysDetailCtxKeyType struct{} 32 33 var ( 34 // CommitDetailCtxKey presents CommitDetail info key in context. 35 CommitDetailCtxKey = commitDetailCtxKeyType{} 36 37 // LockKeysDetailCtxKey presents LockKeysDetail info key in context. 38 LockKeysDetailCtxKey = lockKeysDetailCtxKeyType{} 39 ) 40 41 // InterDircDetails contains execution detail information. 42 type InterDircDetails struct { 43 CalleeAddress string 44 CopTime time.Duration 45 ProcessTime time.Duration 46 WaitTime time.Duration 47 BackoffTime time.Duration 48 LockKeysDuration time.Duration 49 BackoffSleep map[string]time.Duration 50 BackoffTimes map[string]int 51 RequestCount int 52 TotalKeys int64 53 ProcessedKeys int64 54 CommitDetail *CommitDetails 55 LockKeysDetail *LockKeysDetails 56 } 57 58 type stmtInterDircDetailKeyType struct{} 59 60 // StmtInterDircDetailKey used to carry StmtInterDircDetail info in context.Context. 61 var StmtInterDircDetailKey = stmtInterDircDetailKeyType{} 62 63 // StmtInterDircDetails contains stmt level execution detail info. 64 type StmtInterDircDetails struct { 65 BackoffCount int64 66 BackoffDuration int64 67 WaitKVResFIDeluration int64 68 WaitFIDelResFIDeluration int64 69 WriteALLEGROSQLResFIDeluration time.Duration 70 } 71 72 // CommitDetails contains commit detail information. 73 type CommitDetails struct { 74 GetCommitTsTime time.Duration 75 PrewriteTime time.Duration 76 WaitPrewriteBinlogTime time.Duration 77 CommitTime time.Duration 78 LocalLatchTime time.Duration 79 CommitBackoffTime int64 80 Mu struct { 81 sync.Mutex 82 BackoffTypes []fmt.Stringer 83 } 84 ResolveLockTime int64 85 WriteKeys int 86 WriteSize int 87 PrewriteRegionNum int32 88 TxnRetry int 89 } 90 91 // Merge merges commit details into itself. 92 func (cd *CommitDetails) Merge(other *CommitDetails) { 93 cd.GetCommitTsTime += other.GetCommitTsTime 94 cd.PrewriteTime += other.PrewriteTime 95 cd.WaitPrewriteBinlogTime += other.WaitPrewriteBinlogTime 96 cd.CommitTime += other.CommitTime 97 cd.LocalLatchTime += other.LocalLatchTime 98 cd.CommitBackoffTime += other.CommitBackoffTime 99 cd.ResolveLockTime += other.ResolveLockTime 100 cd.WriteKeys += other.WriteKeys 101 cd.WriteSize += other.WriteSize 102 cd.PrewriteRegionNum += other.PrewriteRegionNum 103 cd.TxnRetry += other.TxnRetry 104 cd.Mu.BackoffTypes = append(cd.Mu.BackoffTypes, other.Mu.BackoffTypes...) 105 } 106 107 // Clone returns a deep copy of itself. 108 func (cd *CommitDetails) Clone() *CommitDetails { 109 commit := &CommitDetails{ 110 GetCommitTsTime: cd.GetCommitTsTime, 111 PrewriteTime: cd.PrewriteTime, 112 WaitPrewriteBinlogTime: cd.WaitPrewriteBinlogTime, 113 CommitTime: cd.CommitTime, 114 LocalLatchTime: cd.LocalLatchTime, 115 CommitBackoffTime: cd.CommitBackoffTime, 116 ResolveLockTime: cd.ResolveLockTime, 117 WriteKeys: cd.WriteKeys, 118 WriteSize: cd.WriteSize, 119 PrewriteRegionNum: cd.PrewriteRegionNum, 120 TxnRetry: cd.TxnRetry, 121 } 122 commit.Mu.BackoffTypes = append([]fmt.Stringer{}, cd.Mu.BackoffTypes...) 123 return commit 124 } 125 126 // LockKeysDetails contains pessimistic dagger keys detail information. 127 type LockKeysDetails struct { 128 TotalTime time.Duration 129 RegionNum int32 130 LockKeys int32 131 ResolveLockTime int64 132 BackoffTime int64 133 Mu struct { 134 sync.Mutex 135 BackoffTypes []fmt.Stringer 136 } 137 LockRPCTime int64 138 LockRPCCount int64 139 RetryCount int 140 } 141 142 // Merge merges dagger keys execution details into self. 143 func (ld *LockKeysDetails) Merge(lockKey *LockKeysDetails) { 144 ld.TotalTime += lockKey.TotalTime 145 ld.RegionNum += lockKey.RegionNum 146 ld.LockKeys += lockKey.LockKeys 147 ld.ResolveLockTime += lockKey.ResolveLockTime 148 ld.BackoffTime += lockKey.BackoffTime 149 ld.LockRPCTime += lockKey.LockRPCTime 150 ld.LockRPCCount += ld.LockRPCCount 151 ld.Mu.BackoffTypes = append(ld.Mu.BackoffTypes, lockKey.Mu.BackoffTypes...) 152 ld.RetryCount++ 153 } 154 155 // Clone returns a deep copy of itself. 156 func (ld *LockKeysDetails) Clone() *LockKeysDetails { 157 dagger := &LockKeysDetails{ 158 TotalTime: ld.TotalTime, 159 RegionNum: ld.RegionNum, 160 LockKeys: ld.LockKeys, 161 ResolveLockTime: ld.ResolveLockTime, 162 BackoffTime: ld.BackoffTime, 163 LockRPCTime: ld.LockRPCTime, 164 LockRPCCount: ld.LockRPCCount, 165 RetryCount: ld.RetryCount, 166 } 167 dagger.Mu.BackoffTypes = append([]fmt.Stringer{}, ld.Mu.BackoffTypes...) 168 return dagger 169 } 170 171 const ( 172 // CopTimeStr represents the sum of cop-task time spend in MilevaDB distALLEGROSQL. 173 CopTimeStr = "Cop_time" 174 // ProcessTimeStr represents the sum of process time of all the interlock tasks. 175 ProcessTimeStr = "Process_time" 176 // WaitTimeStr means the time of all interlock wait. 177 WaitTimeStr = "Wait_time" 178 // BackoffTimeStr means the time of all back-off. 179 BackoffTimeStr = "Backoff_time" 180 // LockKeysTimeStr means the time interval between pessimistic dagger wait start and dagger got obtain 181 LockKeysTimeStr = "LockKeys_time" 182 // RequestCountStr means the request count. 183 RequestCountStr = "Request_count" 184 // TotalKeysStr means the total scan keys. 185 TotalKeysStr = "Total_keys" 186 // ProcessKeysStr means the total processed keys. 187 ProcessKeysStr = "Process_keys" 188 // PreWriteTimeStr means the time of pre-write. 189 PreWriteTimeStr = "Prewrite_time" 190 // WaitPrewriteBinlogTimeStr means the time of waiting prewrite binlog finished when transaction committing. 191 WaitPrewriteBinlogTimeStr = "Wait_prewrite_binlog_time" 192 // CommitTimeStr means the time of commit. 193 CommitTimeStr = "Commit_time" 194 // GetCommitTSTimeStr means the time of getting commit ts. 195 GetCommitTSTimeStr = "Get_commit_ts_time" 196 // CommitBackoffTimeStr means the time of commit backoff. 197 CommitBackoffTimeStr = "Commit_backoff_time" 198 // BackoffTypesStr means the backoff type. 199 BackoffTypesStr = "Backoff_types" 200 // ResolveLockTimeStr means the time of resolving dagger. 201 ResolveLockTimeStr = "Resolve_lock_time" 202 // LocalLatchWaitTimeStr means the time of waiting in local latch. 203 LocalLatchWaitTimeStr = "Local_latch_wait_time" 204 // WriteKeysStr means the count of keys in the transaction. 205 WriteKeysStr = "Write_keys" 206 // WriteSizeStr means the key/value size in the transaction. 207 WriteSizeStr = "Write_size" 208 // PrewriteRegionStr means the count of region when pre-write. 209 PrewriteRegionStr = "Prewrite_region" 210 // TxnRetryStr means the count of transaction retry. 211 TxnRetryStr = "Txn_retry" 212 ) 213 214 // String implements the fmt.Stringer interface. 215 func (d InterDircDetails) String() string { 216 parts := make([]string, 0, 8) 217 if d.CopTime > 0 { 218 parts = append(parts, CopTimeStr+": "+strconv.FormatFloat(d.CopTime.Seconds(), 'f', -1, 64)) 219 } 220 if d.ProcessTime > 0 { 221 parts = append(parts, ProcessTimeStr+": "+strconv.FormatFloat(d.ProcessTime.Seconds(), 'f', -1, 64)) 222 } 223 if d.WaitTime > 0 { 224 parts = append(parts, WaitTimeStr+": "+strconv.FormatFloat(d.WaitTime.Seconds(), 'f', -1, 64)) 225 } 226 if d.BackoffTime > 0 { 227 parts = append(parts, BackoffTimeStr+": "+strconv.FormatFloat(d.BackoffTime.Seconds(), 'f', -1, 64)) 228 } 229 if d.LockKeysDuration > 0 { 230 parts = append(parts, LockKeysTimeStr+": "+strconv.FormatFloat(d.LockKeysDuration.Seconds(), 'f', -1, 64)) 231 } 232 if d.RequestCount > 0 { 233 parts = append(parts, RequestCountStr+": "+strconv.FormatInt(int64(d.RequestCount), 10)) 234 } 235 if d.TotalKeys > 0 { 236 parts = append(parts, TotalKeysStr+": "+strconv.FormatInt(d.TotalKeys, 10)) 237 } 238 if d.ProcessedKeys > 0 { 239 parts = append(parts, ProcessKeysStr+": "+strconv.FormatInt(d.ProcessedKeys, 10)) 240 } 241 commitDetails := d.CommitDetail 242 if commitDetails != nil { 243 if commitDetails.PrewriteTime > 0 { 244 parts = append(parts, PreWriteTimeStr+": "+strconv.FormatFloat(commitDetails.PrewriteTime.Seconds(), 'f', -1, 64)) 245 } 246 if commitDetails.WaitPrewriteBinlogTime > 0 { 247 parts = append(parts, WaitPrewriteBinlogTimeStr+": "+strconv.FormatFloat(commitDetails.WaitPrewriteBinlogTime.Seconds(), 'f', -1, 64)) 248 } 249 if commitDetails.CommitTime > 0 { 250 parts = append(parts, CommitTimeStr+": "+strconv.FormatFloat(commitDetails.CommitTime.Seconds(), 'f', -1, 64)) 251 } 252 if commitDetails.GetCommitTsTime > 0 { 253 parts = append(parts, GetCommitTSTimeStr+": "+strconv.FormatFloat(commitDetails.GetCommitTsTime.Seconds(), 'f', -1, 64)) 254 } 255 commitBackoffTime := atomic.LoadInt64(&commitDetails.CommitBackoffTime) 256 if commitBackoffTime > 0 { 257 parts = append(parts, CommitBackoffTimeStr+": "+strconv.FormatFloat(time.Duration(commitBackoffTime).Seconds(), 'f', -1, 64)) 258 } 259 commitDetails.Mu.Lock() 260 if len(commitDetails.Mu.BackoffTypes) > 0 { 261 parts = append(parts, BackoffTypesStr+": "+fmt.Sprintf("%v", commitDetails.Mu.BackoffTypes)) 262 } 263 commitDetails.Mu.Unlock() 264 resolveLockTime := atomic.LoadInt64(&commitDetails.ResolveLockTime) 265 if resolveLockTime > 0 { 266 parts = append(parts, ResolveLockTimeStr+": "+strconv.FormatFloat(time.Duration(resolveLockTime).Seconds(), 'f', -1, 64)) 267 } 268 if commitDetails.LocalLatchTime > 0 { 269 parts = append(parts, LocalLatchWaitTimeStr+": "+strconv.FormatFloat(commitDetails.LocalLatchTime.Seconds(), 'f', -1, 64)) 270 } 271 if commitDetails.WriteKeys > 0 { 272 parts = append(parts, WriteKeysStr+": "+strconv.FormatInt(int64(commitDetails.WriteKeys), 10)) 273 } 274 if commitDetails.WriteSize > 0 { 275 parts = append(parts, WriteSizeStr+": "+strconv.FormatInt(int64(commitDetails.WriteSize), 10)) 276 } 277 prewriteRegionNum := atomic.LoadInt32(&commitDetails.PrewriteRegionNum) 278 if prewriteRegionNum > 0 { 279 parts = append(parts, PrewriteRegionStr+": "+strconv.FormatInt(int64(prewriteRegionNum), 10)) 280 } 281 if commitDetails.TxnRetry > 0 { 282 parts = append(parts, TxnRetryStr+": "+strconv.FormatInt(int64(commitDetails.TxnRetry), 10)) 283 } 284 } 285 return strings.Join(parts, " ") 286 } 287 288 // ToZapFields wraps the InterDircDetails as zap.Fields. 289 func (d InterDircDetails) ToZapFields() (fields []zap.Field) { 290 fields = make([]zap.Field, 0, 16) 291 if d.CopTime > 0 { 292 fields = append(fields, zap.String(strings.ToLower(CopTimeStr), strconv.FormatFloat(d.CopTime.Seconds(), 'f', -1, 64)+"s")) 293 } 294 if d.ProcessTime > 0 { 295 fields = append(fields, zap.String(strings.ToLower(ProcessTimeStr), strconv.FormatFloat(d.ProcessTime.Seconds(), 'f', -1, 64)+"s")) 296 } 297 if d.WaitTime > 0 { 298 fields = append(fields, zap.String(strings.ToLower(WaitTimeStr), strconv.FormatFloat(d.WaitTime.Seconds(), 'f', -1, 64)+"s")) 299 } 300 if d.BackoffTime > 0 { 301 fields = append(fields, zap.String(strings.ToLower(BackoffTimeStr), strconv.FormatFloat(d.BackoffTime.Seconds(), 'f', -1, 64)+"s")) 302 } 303 if d.RequestCount > 0 { 304 fields = append(fields, zap.String(strings.ToLower(RequestCountStr), strconv.FormatInt(int64(d.RequestCount), 10))) 305 } 306 if d.TotalKeys > 0 { 307 fields = append(fields, zap.String(strings.ToLower(TotalKeysStr), strconv.FormatInt(d.TotalKeys, 10))) 308 } 309 if d.ProcessedKeys > 0 { 310 fields = append(fields, zap.String(strings.ToLower(ProcessKeysStr), strconv.FormatInt(d.ProcessedKeys, 10))) 311 } 312 commitDetails := d.CommitDetail 313 if commitDetails != nil { 314 if commitDetails.PrewriteTime > 0 { 315 fields = append(fields, zap.String("prewrite_time", fmt.Sprintf("%v", strconv.FormatFloat(commitDetails.PrewriteTime.Seconds(), 'f', -1, 64)+"s"))) 316 } 317 if commitDetails.CommitTime > 0 { 318 fields = append(fields, zap.String("commit_time", fmt.Sprintf("%v", strconv.FormatFloat(commitDetails.CommitTime.Seconds(), 'f', -1, 64)+"s"))) 319 } 320 if commitDetails.GetCommitTsTime > 0 { 321 fields = append(fields, zap.String("get_commit_ts_time", fmt.Sprintf("%v", strconv.FormatFloat(commitDetails.GetCommitTsTime.Seconds(), 'f', -1, 64)+"s"))) 322 } 323 commitBackoffTime := atomic.LoadInt64(&commitDetails.CommitBackoffTime) 324 if commitBackoffTime > 0 { 325 fields = append(fields, zap.String("commit_backoff_time", fmt.Sprintf("%v", strconv.FormatFloat(time.Duration(commitBackoffTime).Seconds(), 'f', -1, 64)+"s"))) 326 } 327 commitDetails.Mu.Lock() 328 if len(commitDetails.Mu.BackoffTypes) > 0 { 329 fields = append(fields, zap.String("backoff_types", fmt.Sprintf("%v", commitDetails.Mu.BackoffTypes))) 330 } 331 commitDetails.Mu.Unlock() 332 resolveLockTime := atomic.LoadInt64(&commitDetails.ResolveLockTime) 333 if resolveLockTime > 0 { 334 fields = append(fields, zap.String("resolve_lock_time", fmt.Sprintf("%v", strconv.FormatFloat(time.Duration(resolveLockTime).Seconds(), 'f', -1, 64)+"s"))) 335 } 336 if commitDetails.LocalLatchTime > 0 { 337 fields = append(fields, zap.String("local_latch_wait_time", fmt.Sprintf("%v", strconv.FormatFloat(commitDetails.LocalLatchTime.Seconds(), 'f', -1, 64)+"s"))) 338 } 339 if commitDetails.WriteKeys > 0 { 340 fields = append(fields, zap.Int("write_keys", commitDetails.WriteKeys)) 341 } 342 if commitDetails.WriteSize > 0 { 343 fields = append(fields, zap.Int("write_size", commitDetails.WriteSize)) 344 } 345 prewriteRegionNum := atomic.LoadInt32(&commitDetails.PrewriteRegionNum) 346 if prewriteRegionNum > 0 { 347 fields = append(fields, zap.Int32("prewrite_region", prewriteRegionNum)) 348 } 349 if commitDetails.TxnRetry > 0 { 350 fields = append(fields, zap.Int("txn_retry", commitDetails.TxnRetry)) 351 } 352 } 353 return fields 354 } 355 356 // CopRuntimeStats defCauslects cop tasks' execution info. 357 type CopRuntimeStats struct { 358 sync.Mutex 359 360 // stats stores the runtime statistics of interlock tasks. 361 // The key of the map is the einsteindb-server address. Because a einsteindb-server can 362 // have many region leaders, several interlock tasks can be sent to the 363 // same einsteindb-server instance. We have to use a list to maintain all tasks 364 // executed on each instance. 365 stats map[string][]*BasicRuntimeStats 366 } 367 368 // RecordOneCopTask records a specific cop tasks's execution detail. 369 func (crs *CopRuntimeStats) RecordOneCopTask(address string, summary *fidelpb.InterlockingDirectorateInterDircutionSummary) { 370 crs.Lock() 371 defer crs.Unlock() 372 crs.stats[address] = append(crs.stats[address], 373 &BasicRuntimeStats{loop: int32(*summary.NumIterations), 374 consume: int64(*summary.TimeProcessedNs), 375 rows: int64(*summary.NumProducedRows)}) 376 } 377 378 // GetActRows return total rows of CopRuntimeStats. 379 func (crs *CopRuntimeStats) GetActRows() (totalRows int64) { 380 for _, instanceStats := range crs.stats { 381 for _, stat := range instanceStats { 382 totalRows += stat.rows 383 } 384 } 385 return totalRows 386 } 387 388 func (crs *CopRuntimeStats) String() string { 389 if len(crs.stats) == 0 { 390 return "" 391 } 392 393 var totalTasks int64 394 var totalIters int32 395 procTimes := make([]time.Duration, 0, 32) 396 for _, instanceStats := range crs.stats { 397 for _, stat := range instanceStats { 398 procTimes = append(procTimes, time.Duration(stat.consume)*time.Nanosecond) 399 totalIters += stat.loop 400 totalTasks++ 401 } 402 } 403 404 if totalTasks == 1 { 405 return fmt.Sprintf("time:%v, loops:%d", procTimes[0], totalIters) 406 } 407 408 n := len(procTimes) 409 sort.Slice(procTimes, func(i, j int) bool { return procTimes[i] < procTimes[j] }) 410 return fmt.Sprintf("proc max:%v, min:%v, p80:%v, p95:%v, iters:%v, tasks:%v", 411 procTimes[n-1], procTimes[0], procTimes[n*4/5], procTimes[n*19/20], totalIters, totalTasks) 412 } 413 414 const ( 415 // TpBasicRuntimeStats is the tp for BasicRuntimeStats. 416 TpBasicRuntimeStats int = iota 417 // TpRuntimeStatsWithCommit is the tp for RuntimeStatsWithCommit. 418 TpRuntimeStatsWithCommit 419 // TpRuntimeStatsWithConcurrencyInfo is the tp for RuntimeStatsWithConcurrencyInfo. 420 TpRuntimeStatsWithConcurrencyInfo 421 // TpSnapshotRuntimeStats is the tp for SnapshotRuntimeStats. 422 TpSnapshotRuntimeStats 423 // TpHashJoinRuntimeStats is the tp for HashJoinRuntimeStats. 424 TpHashJoinRuntimeStats 425 // TpIndexLookUpJoinRuntimeStats is the tp for IndexLookUpJoinRuntimeStats. 426 TpIndexLookUpJoinRuntimeStats 427 // TpRuntimeStatsWithSnapshot is the tp for RuntimeStatsWithSnapshot. 428 TpRuntimeStatsWithSnapshot 429 // TpJoinRuntimeStats is the tp for JoinRuntimeStats. 430 TpJoinRuntimeStats 431 // TpSelectResultRuntimeStats is the tp for SelectResultRuntimeStats. 432 TpSelectResultRuntimeStats 433 ) 434 435 // RuntimeStats is used to express the interlock runtime information. 436 type RuntimeStats interface { 437 String() string 438 Merge(RuntimeStats) 439 Clone() RuntimeStats 440 Tp() int 441 } 442 443 // BasicRuntimeStats is the basic runtime stats. 444 type BasicRuntimeStats struct { 445 // interlock's Next() called times. 446 loop int32 447 // interlock consume time. 448 consume int64 449 // interlock return event count. 450 rows int64 451 } 452 453 // GetActRows return total rows of BasicRuntimeStats. 454 func (e *BasicRuntimeStats) GetActRows() int64 { 455 return e.rows 456 } 457 458 // Clone implements the RuntimeStats interface. 459 func (e *BasicRuntimeStats) Clone() RuntimeStats { 460 return &BasicRuntimeStats{ 461 loop: e.loop, 462 consume: e.consume, 463 rows: e.rows, 464 } 465 } 466 467 // Merge implements the RuntimeStats interface. 468 func (e *BasicRuntimeStats) Merge(rs RuntimeStats) { 469 tmp, ok := rs.(*BasicRuntimeStats) 470 if !ok { 471 return 472 } 473 e.loop += tmp.loop 474 e.consume += tmp.consume 475 e.rows += tmp.rows 476 } 477 478 // Tp implements the RuntimeStats interface. 479 func (e *BasicRuntimeStats) Tp() int { 480 return TpBasicRuntimeStats 481 } 482 483 // RootRuntimeStats is the interlock runtime stats that combine with multiple runtime stats. 484 type RootRuntimeStats struct { 485 basics []*BasicRuntimeStats 486 groupRss [][]RuntimeStats 487 } 488 489 // GetActRows return total rows of RootRuntimeStats. 490 func (e *RootRuntimeStats) GetActRows() int64 { 491 num := int64(0) 492 for _, basic := range e.basics { 493 num += basic.GetActRows() 494 } 495 return num 496 } 497 498 // String implements the RuntimeStats interface. 499 func (e *RootRuntimeStats) String() string { 500 buf := bytes.NewBuffer(make([]byte, 0, 32)) 501 if len(e.basics) > 0 { 502 if len(e.basics) == 1 { 503 buf.WriteString(e.basics[0].String()) 504 } else { 505 basic := e.basics[0].Clone() 506 for i := 1; i < len(e.basics); i++ { 507 basic.Merge(e.basics[i]) 508 } 509 buf.WriteString(basic.String()) 510 } 511 } 512 if len(e.groupRss) > 0 { 513 if buf.Len() > 0 { 514 buf.WriteString(", ") 515 } 516 for i, rss := range e.groupRss { 517 if i > 0 { 518 buf.WriteString(", ") 519 } 520 if len(rss) == 1 { 521 buf.WriteString(rss[0].String()) 522 continue 523 } 524 rs := rss[0].Clone() 525 for i := 1; i < len(rss); i++ { 526 rs.Merge(rss[i]) 527 } 528 buf.WriteString(rs.String()) 529 } 530 } 531 return buf.String() 532 } 533 534 // Record records interlock's execution. 535 func (e *BasicRuntimeStats) Record(d time.Duration, rowNum int) { 536 atomic.AddInt32(&e.loop, 1) 537 atomic.AddInt64(&e.consume, int64(d)) 538 atomic.AddInt64(&e.rows, int64(rowNum)) 539 } 540 541 // SetRowNum sets the event num. 542 func (e *BasicRuntimeStats) SetRowNum(rowNum int64) { 543 atomic.StoreInt64(&e.rows, rowNum) 544 } 545 546 // String implements the RuntimeStats interface. 547 func (e *BasicRuntimeStats) String() string { 548 return fmt.Sprintf("time:%v, loops:%d", time.Duration(e.consume), e.loop) 549 } 550 551 // RuntimeStatsDefCausl defCauslects interlocks's execution info. 552 type RuntimeStatsDefCausl struct { 553 mu sync.Mutex 554 rootStats map[int]*RootRuntimeStats 555 copStats map[int]*CopRuntimeStats 556 } 557 558 // NewRuntimeStatsDefCausl creates new interlock defCauslector. 559 func NewRuntimeStatsDefCausl() *RuntimeStatsDefCausl { 560 return &RuntimeStatsDefCausl{rootStats: make(map[int]*RootRuntimeStats), 561 copStats: make(map[int]*CopRuntimeStats)} 562 } 563 564 // RegisterStats register execStat for a interlock. 565 func (e *RuntimeStatsDefCausl) RegisterStats(planID int, info RuntimeStats) { 566 e.mu.Lock() 567 stats, ok := e.rootStats[planID] 568 if !ok { 569 stats = &RootRuntimeStats{} 570 e.rootStats[planID] = stats 571 } 572 if basic, ok := info.(*BasicRuntimeStats); ok { 573 stats.basics = append(stats.basics, basic) 574 } else { 575 tp := info.Tp() 576 found := false 577 for i, rss := range stats.groupRss { 578 if len(rss) == 0 { 579 continue 580 } 581 if rss[0].Tp() == tp { 582 stats.groupRss[i] = append(stats.groupRss[i], info) 583 found = true 584 break 585 } 586 } 587 if !found { 588 stats.groupRss = append(stats.groupRss, []RuntimeStats{info}) 589 } 590 } 591 e.mu.Unlock() 592 } 593 594 // GetRootStats gets execStat for a interlock. 595 func (e *RuntimeStatsDefCausl) GetRootStats(planID int) *RootRuntimeStats { 596 e.mu.Lock() 597 defer e.mu.Unlock() 598 runtimeStats, exists := e.rootStats[planID] 599 if !exists { 600 runtimeStats = &RootRuntimeStats{} 601 e.rootStats[planID] = runtimeStats 602 } 603 return runtimeStats 604 } 605 606 // GetCopStats gets the CopRuntimeStats specified by planID. 607 func (e *RuntimeStatsDefCausl) GetCopStats(planID int) *CopRuntimeStats { 608 e.mu.Lock() 609 defer e.mu.Unlock() 610 copStats, ok := e.copStats[planID] 611 if !ok { 612 copStats = &CopRuntimeStats{stats: make(map[string][]*BasicRuntimeStats)} 613 e.copStats[planID] = copStats 614 } 615 return copStats 616 } 617 618 func getCausetIDFromInterDircutionSummary(summary *fidelpb.InterlockingDirectorateInterDircutionSummary) (int, bool) { 619 if summary.GetInterlockingDirectorateId() != "" { 620 strs := strings.Split(summary.GetInterlockingDirectorateId(), "_") 621 if id, err := strconv.Atoi(strs[len(strs)-1]); err == nil { 622 return id, true 623 } 624 } 625 return 0, false 626 } 627 628 // RecordOneCopTask records a specific cop tasks's execution detail. 629 func (e *RuntimeStatsDefCausl) RecordOneCopTask(planID int, address string, summary *fidelpb.InterlockingDirectorateInterDircutionSummary) { 630 // for TiFlash cop response, InterlockingDirectorateInterDircutionSummary contains interlock id, so if there is a valid interlock id in 631 // summary, use it overwrite the planID 632 if id, valid := getCausetIDFromInterDircutionSummary(summary); valid { 633 planID = id 634 } 635 copStats := e.GetCopStats(planID) 636 copStats.RecordOneCopTask(address, summary) 637 } 638 639 // ExistsRootStats checks if the planID exists in the rootStats defCauslection. 640 func (e *RuntimeStatsDefCausl) ExistsRootStats(planID int) bool { 641 e.mu.Lock() 642 defer e.mu.Unlock() 643 _, exists := e.rootStats[planID] 644 return exists 645 } 646 647 // ExistsCopStats checks if the planID exists in the copStats defCauslection. 648 func (e *RuntimeStatsDefCausl) ExistsCopStats(planID int) bool { 649 e.mu.Lock() 650 defer e.mu.Unlock() 651 _, exists := e.copStats[planID] 652 return exists 653 } 654 655 // ConcurrencyInfo is used to save the concurrency information of the interlock operator 656 type ConcurrencyInfo struct { 657 concurrencyName string 658 concurrencyNum int 659 } 660 661 // NewConcurrencyInfo creates new interlock's concurrencyInfo. 662 func NewConcurrencyInfo(name string, num int) *ConcurrencyInfo { 663 return &ConcurrencyInfo{name, num} 664 } 665 666 // RuntimeStatsWithConcurrencyInfo is the BasicRuntimeStats with ConcurrencyInfo. 667 type RuntimeStatsWithConcurrencyInfo struct { 668 // protect concurrency 669 sync.Mutex 670 // interlock concurrency information 671 concurrency []*ConcurrencyInfo 672 } 673 674 // Tp implements the RuntimeStats interface. 675 func (e *RuntimeStatsWithConcurrencyInfo) Tp() int { 676 return TpRuntimeStatsWithConcurrencyInfo 677 } 678 679 // SetConcurrencyInfo sets the concurrency informations. 680 // We must clear the concurrencyInfo first when we call the SetConcurrencyInfo. 681 // When the num <= 0, it means the exector operator is not executed parallel. 682 func (e *RuntimeStatsWithConcurrencyInfo) SetConcurrencyInfo(infos ...*ConcurrencyInfo) { 683 e.Lock() 684 defer e.Unlock() 685 e.concurrency = e.concurrency[:0] 686 for _, info := range infos { 687 e.concurrency = append(e.concurrency, info) 688 } 689 } 690 691 // Clone implements the RuntimeStats interface. 692 func (e *RuntimeStatsWithConcurrencyInfo) Clone() RuntimeStats { 693 newRs := &RuntimeStatsWithConcurrencyInfo{ 694 concurrency: make([]*ConcurrencyInfo, 0, len(e.concurrency)), 695 } 696 newRs.concurrency = append(newRs.concurrency, e.concurrency...) 697 return newRs 698 } 699 700 // String implements the RuntimeStats interface. 701 func (e *RuntimeStatsWithConcurrencyInfo) String() string { 702 var result string 703 if len(e.concurrency) > 0 { 704 for i, concurrency := range e.concurrency { 705 if i > 0 { 706 result += ", " 707 } 708 if concurrency.concurrencyNum > 0 { 709 result += fmt.Sprintf("%s:%d", concurrency.concurrencyName, concurrency.concurrencyNum) 710 } else { 711 result += fmt.Sprintf("%s:OFF", concurrency.concurrencyName) 712 } 713 } 714 } 715 return result 716 } 717 718 // Merge implements the RuntimeStats interface. 719 func (e *RuntimeStatsWithConcurrencyInfo) Merge(rs RuntimeStats) { 720 tmp, ok := rs.(*RuntimeStatsWithConcurrencyInfo) 721 if !ok { 722 return 723 } 724 e.concurrency = append(e.concurrency, tmp.concurrency...) 725 } 726 727 // RuntimeStatsWithCommit is the RuntimeStats with commit detail. 728 type RuntimeStatsWithCommit struct { 729 Commit *CommitDetails 730 LockKeys *LockKeysDetails 731 } 732 733 // Tp implements the RuntimeStats interface. 734 func (e *RuntimeStatsWithCommit) Tp() int { 735 return TpRuntimeStatsWithCommit 736 } 737 738 // Merge implements the RuntimeStats interface. 739 func (e *RuntimeStatsWithCommit) Merge(rs RuntimeStats) { 740 tmp, ok := rs.(*RuntimeStatsWithCommit) 741 if !ok { 742 return 743 } 744 if tmp.Commit != nil { 745 if e.Commit == nil { 746 e.Commit = &CommitDetails{} 747 } 748 e.Commit.Merge(tmp.Commit) 749 } 750 751 if tmp.LockKeys != nil { 752 if e.LockKeys == nil { 753 e.LockKeys = &LockKeysDetails{} 754 } 755 e.LockKeys.Merge(tmp.LockKeys) 756 } 757 } 758 759 // Clone implements the RuntimeStats interface. 760 func (e *RuntimeStatsWithCommit) Clone() RuntimeStats { 761 newRs := RuntimeStatsWithCommit{} 762 if e.Commit != nil { 763 newRs.Commit = e.Commit.Clone() 764 } 765 if e.LockKeys != nil { 766 newRs.LockKeys = e.LockKeys.Clone() 767 } 768 return &newRs 769 } 770 771 // String implements the RuntimeStats interface. 772 func (e *RuntimeStatsWithCommit) String() string { 773 buf := bytes.NewBuffer(make([]byte, 0, 32)) 774 if e.Commit != nil { 775 buf.WriteString("commit_txn: {") 776 if e.Commit.PrewriteTime > 0 { 777 buf.WriteString("prewrite:") 778 buf.WriteString(e.Commit.PrewriteTime.String()) 779 } 780 if e.Commit.WaitPrewriteBinlogTime > 0 { 781 buf.WriteString(", wait_prewrite_binlog:") 782 buf.WriteString(e.Commit.WaitPrewriteBinlogTime.String()) 783 } 784 if e.Commit.GetCommitTsTime > 0 { 785 buf.WriteString(", get_commit_ts:") 786 buf.WriteString(e.Commit.GetCommitTsTime.String()) 787 } 788 if e.Commit.CommitTime > 0 { 789 buf.WriteString(", commit:") 790 buf.WriteString(e.Commit.CommitTime.String()) 791 } 792 commitBackoffTime := atomic.LoadInt64(&e.Commit.CommitBackoffTime) 793 if commitBackoffTime > 0 { 794 buf.WriteString(", backoff: {time: ") 795 buf.WriteString(time.Duration(commitBackoffTime).String()) 796 e.Commit.Mu.Lock() 797 if len(e.Commit.Mu.BackoffTypes) > 0 { 798 buf.WriteString(", type: ") 799 buf.WriteString(e.formatBackoff(e.Commit.Mu.BackoffTypes)) 800 } 801 e.Commit.Mu.Unlock() 802 buf.WriteString("}") 803 } 804 if e.Commit.ResolveLockTime > 0 { 805 buf.WriteString(", resolve_lock: ") 806 buf.WriteString(time.Duration(e.Commit.ResolveLockTime).String()) 807 } 808 809 prewriteRegionNum := atomic.LoadInt32(&e.Commit.PrewriteRegionNum) 810 if prewriteRegionNum > 0 { 811 buf.WriteString(", region_num:") 812 buf.WriteString(strconv.FormatInt(int64(prewriteRegionNum), 10)) 813 } 814 if e.Commit.WriteKeys > 0 { 815 buf.WriteString(", write_keys:") 816 buf.WriteString(strconv.FormatInt(int64(e.Commit.WriteKeys), 10)) 817 } 818 if e.Commit.WriteSize > 0 { 819 buf.WriteString(", write_byte:") 820 buf.WriteString(strconv.FormatInt(int64(e.Commit.WriteSize), 10)) 821 } 822 if e.Commit.TxnRetry > 0 { 823 buf.WriteString(", txn_retry:") 824 buf.WriteString(strconv.FormatInt(int64(e.Commit.TxnRetry), 10)) 825 } 826 buf.WriteString("}") 827 } 828 if e.LockKeys != nil { 829 if buf.Len() > 0 { 830 buf.WriteString(", ") 831 } 832 buf.WriteString("lock_keys: {") 833 if e.LockKeys.TotalTime > 0 { 834 buf.WriteString("time:") 835 buf.WriteString(e.LockKeys.TotalTime.String()) 836 } 837 if e.LockKeys.RegionNum > 0 { 838 buf.WriteString(", region:") 839 buf.WriteString(strconv.FormatInt(int64(e.LockKeys.RegionNum), 10)) 840 } 841 if e.LockKeys.LockKeys > 0 { 842 buf.WriteString(", keys:") 843 buf.WriteString(strconv.FormatInt(int64(e.LockKeys.LockKeys), 10)) 844 } 845 if e.LockKeys.ResolveLockTime > 0 { 846 buf.WriteString(", resolve_lock:") 847 buf.WriteString(time.Duration(e.LockKeys.ResolveLockTime).String()) 848 } 849 if e.LockKeys.BackoffTime > 0 { 850 buf.WriteString(", backoff: {time: ") 851 buf.WriteString(time.Duration(e.LockKeys.BackoffTime).String()) 852 e.LockKeys.Mu.Lock() 853 if len(e.LockKeys.Mu.BackoffTypes) > 0 { 854 buf.WriteString(", type: ") 855 buf.WriteString(e.formatBackoff(e.LockKeys.Mu.BackoffTypes)) 856 } 857 e.LockKeys.Mu.Unlock() 858 buf.WriteString("}") 859 } 860 if e.LockKeys.LockRPCTime > 0 { 861 buf.WriteString(", lock_rpc:") 862 buf.WriteString(time.Duration(e.LockKeys.LockRPCTime).String()) 863 } 864 if e.LockKeys.LockRPCCount > 0 { 865 buf.WriteString(", rpc_count:") 866 buf.WriteString(strconv.FormatInt(e.LockKeys.LockRPCCount, 10)) 867 } 868 if e.LockKeys.RetryCount > 0 { 869 buf.WriteString(", retry_count:") 870 buf.WriteString(strconv.FormatInt(int64(e.LockKeys.RetryCount), 10)) 871 } 872 buf.WriteString("}") 873 } 874 return buf.String() 875 } 876 877 func (e *RuntimeStatsWithCommit) formatBackoff(backoffTypes []fmt.Stringer) string { 878 if len(backoffTypes) == 0 { 879 return "" 880 } 881 tpMap := make(map[string]struct{}) 882 tpArray := []string{} 883 for _, tp := range backoffTypes { 884 tpStr := tp.String() 885 _, ok := tpMap[tpStr] 886 if ok { 887 continue 888 } 889 tpMap[tpStr] = struct{}{} 890 tpArray = append(tpArray, tpStr) 891 } 892 sort.Strings(tpArray) 893 return fmt.Sprintf("%v", tpArray) 894 }