github.com/whtcorpsinc/milevadb-prod@v0.0.0-20211104133533-f57f4be3b597/interlock/slow_query.go (about) 1 // Copyright 2020 WHTCORPS INC, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 package interlock 15 16 import ( 17 "bufio" 18 "context" 19 "fmt" 20 "io" 21 "os" 22 "path/filepath" 23 "sort" 24 "strconv" 25 "strings" 26 "sync" 27 "time" 28 29 "github.com/whtcorpsinc/BerolinaSQL/allegrosql" 30 "github.com/whtcorpsinc/BerolinaSQL/auth" 31 "github.com/whtcorpsinc/BerolinaSQL/perceptron" 32 "github.com/whtcorpsinc/BerolinaSQL/terror" 33 "github.com/whtcorpsinc/errors" 34 "github.com/whtcorpsinc/failpoint" 35 causetembedded "github.com/whtcorpsinc/milevadb/causet/embedded" 36 "github.com/whtcorpsinc/milevadb/privilege" 37 "github.com/whtcorpsinc/milevadb/schemareplicant" 38 "github.com/whtcorpsinc/milevadb/soliton/execdetails" 39 "github.com/whtcorpsinc/milevadb/soliton/logutil" 40 "github.com/whtcorpsinc/milevadb/soliton/plancodec" 41 "github.com/whtcorpsinc/milevadb/soliton/replog" 42 "github.com/whtcorpsinc/milevadb/stochastikctx" 43 "github.com/whtcorpsinc/milevadb/stochastikctx/variable" 44 "github.com/whtcorpsinc/milevadb/types" 45 "go.uber.org/zap" 46 ) 47 48 //slowQueryRetriever is used to read slow log data. 49 type slowQueryRetriever struct { 50 causet *perceptron.BlockInfo 51 outputDefCauss []*perceptron.DeferredCausetInfo 52 initialized bool 53 extractor *causetembedded.SlowQueryExtractor 54 files []logFile 55 fileIdx int 56 fileLine int 57 checker *slowLogChecker 58 59 parsedSlowLogCh chan parsedSlowLog 60 } 61 62 func (e *slowQueryRetriever) retrieve(ctx context.Context, sctx stochastikctx.Context) ([][]types.Causet, error) { 63 if !e.initialized { 64 err := e.initialize(sctx) 65 if err != nil { 66 return nil, err 67 } 68 e.initializeAsyncParsing(ctx, sctx) 69 } 70 rows, retrieved, err := e.dataForSlowLog(ctx) 71 if err != nil { 72 return nil, err 73 } 74 if retrieved { 75 return nil, nil 76 } 77 if len(e.outputDefCauss) == len(e.causet.DeferredCausets) { 78 return rows, nil 79 } 80 retEvents := make([][]types.Causet, len(rows)) 81 for i, fullEvent := range rows { 82 event := make([]types.Causet, len(e.outputDefCauss)) 83 for j, defCaus := range e.outputDefCauss { 84 event[j] = fullEvent[defCaus.Offset] 85 } 86 retEvents[i] = event 87 } 88 return retEvents, nil 89 } 90 91 func (e *slowQueryRetriever) initialize(sctx stochastikctx.Context) error { 92 var err error 93 var hasProcessPriv bool 94 if pm := privilege.GetPrivilegeManager(sctx); pm != nil { 95 hasProcessPriv = pm.RequestVerification(sctx.GetStochastikVars().ActiveRoles, "", "", "", allegrosql.ProcessPriv) 96 } 97 e.checker = &slowLogChecker{ 98 hasProcessPriv: hasProcessPriv, 99 user: sctx.GetStochastikVars().User, 100 } 101 if e.extractor != nil { 102 e.checker.enableTimeCheck = e.extractor.Enable 103 e.checker.startTime = types.NewTime(types.FromGoTime(e.extractor.StartTime), allegrosql.TypeDatetime, types.MaxFsp) 104 e.checker.endTime = types.NewTime(types.FromGoTime(e.extractor.EndTime), allegrosql.TypeDatetime, types.MaxFsp) 105 } 106 e.initialized = true 107 e.files, err = e.getAllFiles(sctx, sctx.GetStochastikVars().SlowQueryFile) 108 return err 109 } 110 111 func (e *slowQueryRetriever) close() error { 112 for _, f := range e.files { 113 err := f.file.Close() 114 if err != nil { 115 logutil.BgLogger().Error("close slow log file failed.", zap.Error(err)) 116 } 117 } 118 return nil 119 } 120 121 type parsedSlowLog struct { 122 rows [][]types.Causet 123 err error 124 } 125 126 func (e *slowQueryRetriever) parseDataForSlowLog(ctx context.Context, sctx stochastikctx.Context) { 127 if len(e.files) == 0 { 128 close(e.parsedSlowLogCh) 129 return 130 } 131 reader := bufio.NewReader(e.files[0].file) 132 e.parseSlowLog(ctx, sctx, reader, 64) 133 close(e.parsedSlowLogCh) 134 } 135 136 func (e *slowQueryRetriever) dataForSlowLog(ctx context.Context) ([][]types.Causet, bool, error) { 137 var ( 138 slowLog parsedSlowLog 139 ok bool 140 ) 141 for { 142 select { 143 case slowLog, ok = <-e.parsedSlowLogCh: 144 case <-ctx.Done(): 145 return nil, false, ctx.Err() 146 } 147 if !ok { 148 return nil, true, nil 149 } 150 rows, err := slowLog.rows, slowLog.err 151 if err != nil { 152 return nil, false, err 153 } 154 if len(rows) == 0 { 155 continue 156 } 157 if e.causet.Name.L == strings.ToLower(schemareplicant.ClusterBlockSlowLog) { 158 rows, err := schemareplicant.AppendHostInfoToEvents(rows) 159 return rows, false, err 160 } 161 return rows, false, nil 162 } 163 } 164 165 type slowLogChecker struct { 166 // Below fields is used to check privilege. 167 hasProcessPriv bool 168 user *auth.UserIdentity 169 // Below fields is used to check slow log time valid. 170 enableTimeCheck bool 171 startTime types.Time 172 endTime types.Time 173 } 174 175 func (sc *slowLogChecker) hasPrivilege(userName string) bool { 176 return sc.hasProcessPriv || sc.user == nil || userName == sc.user.Username 177 } 178 179 func (sc *slowLogChecker) isTimeValid(t types.Time) bool { 180 if sc.enableTimeCheck && (t.Compare(sc.startTime) < 0 || t.Compare(sc.endTime) > 0) { 181 return false 182 } 183 return true 184 } 185 186 func getOneLine(reader *bufio.Reader) ([]byte, error) { 187 var resByte []byte 188 lineByte, isPrefix, err := reader.ReadLine() 189 if isPrefix { 190 // Need to read more data. 191 resByte = make([]byte, len(lineByte), len(lineByte)*2) 192 } else { 193 resByte = make([]byte, len(lineByte)) 194 } 195 // Use copy here to avoid shallow copy problem. 196 copy(resByte, lineByte) 197 if err != nil { 198 return resByte, err 199 } 200 201 var tempLine []byte 202 for isPrefix { 203 tempLine, isPrefix, err = reader.ReadLine() 204 resByte = append(resByte, tempLine...) 205 // Use the max value of max_allowed_packet to check the single line length. 206 if len(resByte) > int(variable.MaxOfMaxAllowedPacket) { 207 return resByte, errors.Errorf("single line length exceeds limit: %v", variable.MaxOfMaxAllowedPacket) 208 } 209 if err != nil { 210 return resByte, err 211 } 212 } 213 return resByte, err 214 } 215 216 type offset struct { 217 offset int 218 length int 219 } 220 221 func (e *slowQueryRetriever) getBatchLog(reader *bufio.Reader, offset *offset, num int) ([]string, error) { 222 var line string 223 log := make([]string, 0, num) 224 var err error 225 for i := 0; i < num; i++ { 226 for { 227 e.fileLine++ 228 lineByte, err := getOneLine(reader) 229 if err != nil { 230 if err == io.EOF { 231 e.fileIdx++ 232 e.fileLine = 0 233 if e.fileIdx >= len(e.files) { 234 return log, nil 235 } 236 offset.length = len(log) 237 reader.Reset(e.files[e.fileIdx].file) 238 continue 239 } 240 return log, err 241 } 242 line = string(replog.String(lineByte)) 243 log = append(log, line) 244 if strings.HasSuffix(line, variable.SlowLogALLEGROSQLSuffixStr) { 245 if strings.HasPrefix(line, "use") { 246 continue 247 } 248 break 249 } 250 } 251 } 252 return log, err 253 } 254 255 func (e *slowQueryRetriever) parseSlowLog(ctx context.Context, sctx stochastikctx.Context, reader *bufio.Reader, logNum int) { 256 var wg sync.WaitGroup 257 offset := offset{offset: 0, length: 0} 258 // To limit the num of go routine 259 ch := make(chan int, sctx.GetStochastikVars().Concurrency.DistALLEGROSQLScanConcurrency()) 260 defer close(ch) 261 for { 262 log, err := e.getBatchLog(reader, &offset, logNum) 263 if err != nil { 264 e.parsedSlowLogCh <- parsedSlowLog{nil, err} 265 break 266 } 267 start := offset 268 wg.Add(1) 269 ch <- 1 270 go func() { 271 defer wg.Done() 272 result, err := e.parseLog(sctx, log, start) 273 if err != nil { 274 e.parsedSlowLogCh <- parsedSlowLog{nil, err} 275 } else { 276 e.parsedSlowLogCh <- parsedSlowLog{result, err} 277 } 278 <-ch 279 }() 280 // Read the next file, offset = 0 281 if e.fileIdx >= len(e.files) { 282 break 283 } 284 offset.offset = e.fileLine 285 offset.length = 0 286 select { 287 case <-ctx.Done(): 288 break 289 default: 290 } 291 } 292 wg.Wait() 293 } 294 295 func getLineIndex(offset offset, index int) int { 296 var fileLine int 297 if offset.length <= index { 298 fileLine = index - offset.length + 1 299 } else { 300 fileLine = offset.offset + index + 1 301 } 302 return fileLine 303 } 304 305 func (e *slowQueryRetriever) parseLog(ctx stochastikctx.Context, log []string, offset offset) (data [][]types.Causet, err error) { 306 defer func() { 307 if r := recover(); r != nil { 308 err = fmt.Errorf("%s", r) 309 } 310 }() 311 failpoint.Inject("errorMockParseSlowLogPanic", func(val failpoint.Value) { 312 if val.(bool) { 313 panic("panic test") 314 } 315 }) 316 var st *slowQueryTuple 317 tz := ctx.GetStochastikVars().Location() 318 startFlag := false 319 for index, line := range log { 320 fileLine := getLineIndex(offset, index) 321 if !startFlag && strings.HasPrefix(line, variable.SlowLogStartPrefixStr) { 322 st = &slowQueryTuple{} 323 valid, err := st.setFieldValue(tz, variable.SlowLogTimeStr, line[len(variable.SlowLogStartPrefixStr):], fileLine, e.checker) 324 if err != nil { 325 ctx.GetStochastikVars().StmtCtx.AppendWarning(err) 326 continue 327 } 328 if valid { 329 startFlag = true 330 } 331 continue 332 } 333 if startFlag { 334 if strings.HasPrefix(line, variable.SlowLogEventPrefixStr) { 335 line = line[len(variable.SlowLogEventPrefixStr):] 336 if strings.HasPrefix(line, variable.SlowLogPrevStmtPrefix) { 337 st.prevStmt = line[len(variable.SlowLogPrevStmtPrefix):] 338 } else if strings.HasPrefix(line, variable.SlowLogUserAndHostStr+variable.SlowLogSpaceMarkStr) { 339 value := line[len(variable.SlowLogUserAndHostStr+variable.SlowLogSpaceMarkStr):] 340 valid, err := st.setFieldValue(tz, variable.SlowLogUserAndHostStr, value, fileLine, e.checker) 341 if err != nil { 342 ctx.GetStochastikVars().StmtCtx.AppendWarning(err) 343 continue 344 } 345 if !valid { 346 startFlag = false 347 } 348 } else { 349 fieldValues := strings.Split(line, " ") 350 for i := 0; i < len(fieldValues)-1; i += 2 { 351 field := fieldValues[i] 352 if strings.HasSuffix(field, ":") { 353 field = field[:len(field)-1] 354 } 355 valid, err := st.setFieldValue(tz, field, fieldValues[i+1], fileLine, e.checker) 356 if err != nil { 357 ctx.GetStochastikVars().StmtCtx.AppendWarning(err) 358 continue 359 } 360 if !valid { 361 startFlag = false 362 } 363 } 364 } 365 } else if strings.HasSuffix(line, variable.SlowLogALLEGROSQLSuffixStr) { 366 if strings.HasPrefix(line, "use") { 367 // `use EDB` memexs in the slow log is used to keep it be compatible with MyALLEGROSQL, 368 // since we already get the current EDB from the `# EDB` field, we can ignore it here, 369 // please see https://github.com/whtcorpsinc/milevadb/issues/17846 for more details. 370 continue 371 } 372 // Get the allegrosql string, and mark the start flag to false. 373 _, err := st.setFieldValue(tz, variable.SlowLogQueryALLEGROSQLStr, string(replog.Slice(line)), fileLine, e.checker) 374 if err != nil { 375 ctx.GetStochastikVars().StmtCtx.AppendWarning(err) 376 continue 377 } 378 if e.checker.hasPrivilege(st.user) { 379 data = append(data, st.convertToCausetEvent()) 380 } 381 startFlag = false 382 } else { 383 startFlag = false 384 } 385 } 386 } 387 return data, nil 388 } 389 390 type slowQueryTuple struct { 391 time types.Time 392 txnStartTs uint64 393 user string 394 host string 395 connID uint64 396 execRetryCount uint64 397 execRetryTime float64 398 queryTime float64 399 parseTime float64 400 compileTime float64 401 rewriteTime float64 402 preprocSubqueries uint64 403 preprocSubQueryTime float64 404 optimizeTime float64 405 waitTSTime float64 406 preWriteTime float64 407 waitPrewriteBinlogTime float64 408 commitTime float64 409 getCommitTSTime float64 410 commitBackoffTime float64 411 backoffTypes string 412 resolveLockTime float64 413 localLatchWaitTime float64 414 writeKeys uint64 415 writeSize uint64 416 prewriteRegion uint64 417 txnRetry uint64 418 copTime float64 419 processTime float64 420 waitTime float64 421 backOffTime float64 422 lockKeysTime float64 423 requestCount uint64 424 totalKeys uint64 425 processKeys uint64 426 EDB string 427 indexIDs string 428 digest string 429 statsInfo string 430 avgProcessTime float64 431 p90ProcessTime float64 432 maxProcessTime float64 433 maxProcessAddress string 434 avgWaitTime float64 435 p90WaitTime float64 436 maxWaitTime float64 437 maxWaitAddress string 438 memMax int64 439 diskMax int64 440 prevStmt string 441 allegrosql string 442 isInternal bool 443 succ bool 444 planFromCache bool 445 plan string 446 planDigest string 447 } 448 449 func (st *slowQueryTuple) setFieldValue(tz *time.Location, field, value string, lineNum int, checker *slowLogChecker) (valid bool, err error) { 450 valid = true 451 switch field { 452 case variable.SlowLogTimeStr: 453 var t time.Time 454 t, err = ParseTime(value) 455 if err != nil { 456 break 457 } 458 st.time = types.NewTime(types.FromGoTime(t), allegrosql.TypeTimestamp, types.MaxFsp) 459 if checker != nil { 460 valid = checker.isTimeValid(st.time) 461 } 462 if t.Location() != tz { 463 t = t.In(tz) 464 st.time = types.NewTime(types.FromGoTime(t), allegrosql.TypeTimestamp, types.MaxFsp) 465 } 466 case variable.SlowLogTxnStartTSStr: 467 st.txnStartTs, err = strconv.ParseUint(value, 10, 64) 468 case variable.SlowLogUserStr: 469 // the old User format is kept for compatibility 470 fields := strings.SplitN(value, "@", 2) 471 if len(field) > 0 { 472 st.user = fields[0] 473 } 474 if len(field) > 1 { 475 st.host = fields[1] 476 } 477 if checker != nil { 478 valid = checker.hasPrivilege(st.user) 479 } 480 case variable.SlowLogUserAndHostStr: 481 // the new User&Host format: root[root] @ localhost [127.0.0.1] 482 fields := strings.SplitN(value, "@", 2) 483 if len(fields) > 0 { 484 tmp := strings.Split(fields[0], "[") 485 st.user = strings.TrimSpace(tmp[0]) 486 } 487 if len(fields) > 1 { 488 tmp := strings.Split(fields[1], "[") 489 st.host = strings.TrimSpace(tmp[0]) 490 } 491 if checker != nil { 492 valid = checker.hasPrivilege(st.user) 493 } 494 case variable.SlowLogConnIDStr: 495 st.connID, err = strconv.ParseUint(value, 10, 64) 496 case variable.SlowLogInterDircRetryCount: 497 st.execRetryCount, err = strconv.ParseUint(value, 10, 64) 498 case variable.SlowLogInterDircRetryTime: 499 st.execRetryTime, err = strconv.ParseFloat(value, 64) 500 case variable.SlowLogQueryTimeStr: 501 st.queryTime, err = strconv.ParseFloat(value, 64) 502 case variable.SlowLogParseTimeStr: 503 st.parseTime, err = strconv.ParseFloat(value, 64) 504 case variable.SlowLogCompileTimeStr: 505 st.compileTime, err = strconv.ParseFloat(value, 64) 506 case variable.SlowLogOptimizeTimeStr: 507 st.optimizeTime, err = strconv.ParseFloat(value, 64) 508 case variable.SlowLogWaitTSTimeStr: 509 st.waitTSTime, err = strconv.ParseFloat(value, 64) 510 case execdetails.PreWriteTimeStr: 511 st.preWriteTime, err = strconv.ParseFloat(value, 64) 512 case execdetails.WaitPrewriteBinlogTimeStr: 513 st.waitPrewriteBinlogTime, err = strconv.ParseFloat(value, 64) 514 case execdetails.CommitTimeStr: 515 st.commitTime, err = strconv.ParseFloat(value, 64) 516 case execdetails.GetCommitTSTimeStr: 517 st.getCommitTSTime, err = strconv.ParseFloat(value, 64) 518 case execdetails.CommitBackoffTimeStr: 519 st.commitBackoffTime, err = strconv.ParseFloat(value, 64) 520 case execdetails.BackoffTypesStr: 521 st.backoffTypes = value 522 case execdetails.ResolveLockTimeStr: 523 st.resolveLockTime, err = strconv.ParseFloat(value, 64) 524 case execdetails.LocalLatchWaitTimeStr: 525 st.localLatchWaitTime, err = strconv.ParseFloat(value, 64) 526 case execdetails.WriteKeysStr: 527 st.writeKeys, err = strconv.ParseUint(value, 10, 64) 528 case execdetails.WriteSizeStr: 529 st.writeSize, err = strconv.ParseUint(value, 10, 64) 530 case execdetails.PrewriteRegionStr: 531 st.prewriteRegion, err = strconv.ParseUint(value, 10, 64) 532 case execdetails.TxnRetryStr: 533 st.txnRetry, err = strconv.ParseUint(value, 10, 64) 534 case execdetails.CopTimeStr: 535 st.copTime, err = strconv.ParseFloat(value, 64) 536 case execdetails.ProcessTimeStr: 537 st.processTime, err = strconv.ParseFloat(value, 64) 538 case execdetails.WaitTimeStr: 539 st.waitTime, err = strconv.ParseFloat(value, 64) 540 case execdetails.BackoffTimeStr: 541 st.backOffTime, err = strconv.ParseFloat(value, 64) 542 case execdetails.LockKeysTimeStr: 543 st.lockKeysTime, err = strconv.ParseFloat(value, 64) 544 case execdetails.RequestCountStr: 545 st.requestCount, err = strconv.ParseUint(value, 10, 64) 546 case execdetails.TotalKeysStr: 547 st.totalKeys, err = strconv.ParseUint(value, 10, 64) 548 case execdetails.ProcessKeysStr: 549 st.processKeys, err = strconv.ParseUint(value, 10, 64) 550 case variable.SlowLogDBStr: 551 st.EDB = value 552 case variable.SlowLogIndexNamesStr: 553 st.indexIDs = value 554 case variable.SlowLogIsInternalStr: 555 st.isInternal = value == "true" 556 case variable.SlowLogDigestStr: 557 st.digest = value 558 case variable.SlowLogStatsInfoStr: 559 st.statsInfo = value 560 case variable.SlowLogCopProcAvg: 561 st.avgProcessTime, err = strconv.ParseFloat(value, 64) 562 case variable.SlowLogCopProcP90: 563 st.p90ProcessTime, err = strconv.ParseFloat(value, 64) 564 case variable.SlowLogCopProcMax: 565 st.maxProcessTime, err = strconv.ParseFloat(value, 64) 566 case variable.SlowLogCopProcAddr: 567 st.maxProcessAddress = value 568 case variable.SlowLogCopWaitAvg: 569 st.avgWaitTime, err = strconv.ParseFloat(value, 64) 570 case variable.SlowLogCopWaitP90: 571 st.p90WaitTime, err = strconv.ParseFloat(value, 64) 572 case variable.SlowLogCopWaitMax: 573 st.maxWaitTime, err = strconv.ParseFloat(value, 64) 574 case variable.SlowLogCopWaitAddr: 575 st.maxWaitAddress = value 576 case variable.SlowLogMemMax: 577 st.memMax, err = strconv.ParseInt(value, 10, 64) 578 case variable.SlowLogSucc: 579 st.succ, err = strconv.ParseBool(value) 580 case variable.SlowLogCausetFromCache: 581 st.planFromCache, err = strconv.ParseBool(value) 582 case variable.SlowLogCauset: 583 st.plan = value 584 case variable.SlowLogCausetDigest: 585 st.planDigest = value 586 case variable.SlowLogQueryALLEGROSQLStr: 587 st.allegrosql = value 588 case variable.SlowLogDiskMax: 589 st.diskMax, err = strconv.ParseInt(value, 10, 64) 590 case variable.SlowLogRewriteTimeStr: 591 st.rewriteTime, err = strconv.ParseFloat(value, 64) 592 case variable.SlowLogPreprocSubQueriesStr: 593 st.preprocSubqueries, err = strconv.ParseUint(value, 10, 64) 594 case variable.SlowLogPreProcSubQueryTimeStr: 595 st.preprocSubQueryTime, err = strconv.ParseFloat(value, 64) 596 } 597 if err != nil { 598 return valid, fmt.Errorf("Parse slow log at line " + strconv.FormatInt(int64(lineNum), 10) + " failed. Field: `" + field + "`, error: " + err.Error()) 599 } 600 return valid, err 601 } 602 603 func (st *slowQueryTuple) convertToCausetEvent() []types.Causet { 604 // Build the slow query result 605 record := make([]types.Causet, 0, 64) 606 record = append(record, types.NewTimeCauset(st.time)) 607 record = append(record, types.NewUintCauset(st.txnStartTs)) 608 record = append(record, types.NewStringCauset(st.user)) 609 record = append(record, types.NewStringCauset(st.host)) 610 record = append(record, types.NewUintCauset(st.connID)) 611 record = append(record, types.NewUintCauset(st.execRetryCount)) 612 record = append(record, types.NewFloat64Causet(st.execRetryTime)) 613 record = append(record, types.NewFloat64Causet(st.queryTime)) 614 record = append(record, types.NewFloat64Causet(st.parseTime)) 615 record = append(record, types.NewFloat64Causet(st.compileTime)) 616 record = append(record, types.NewFloat64Causet(st.rewriteTime)) 617 record = append(record, types.NewUintCauset(st.preprocSubqueries)) 618 record = append(record, types.NewFloat64Causet(st.preprocSubQueryTime)) 619 record = append(record, types.NewFloat64Causet(st.optimizeTime)) 620 record = append(record, types.NewFloat64Causet(st.waitTSTime)) 621 record = append(record, types.NewFloat64Causet(st.preWriteTime)) 622 record = append(record, types.NewFloat64Causet(st.waitPrewriteBinlogTime)) 623 record = append(record, types.NewFloat64Causet(st.commitTime)) 624 record = append(record, types.NewFloat64Causet(st.getCommitTSTime)) 625 record = append(record, types.NewFloat64Causet(st.commitBackoffTime)) 626 record = append(record, types.NewStringCauset(st.backoffTypes)) 627 record = append(record, types.NewFloat64Causet(st.resolveLockTime)) 628 record = append(record, types.NewFloat64Causet(st.localLatchWaitTime)) 629 record = append(record, types.NewUintCauset(st.writeKeys)) 630 record = append(record, types.NewUintCauset(st.writeSize)) 631 record = append(record, types.NewUintCauset(st.prewriteRegion)) 632 record = append(record, types.NewUintCauset(st.txnRetry)) 633 record = append(record, types.NewFloat64Causet(st.copTime)) 634 record = append(record, types.NewFloat64Causet(st.processTime)) 635 record = append(record, types.NewFloat64Causet(st.waitTime)) 636 record = append(record, types.NewFloat64Causet(st.backOffTime)) 637 record = append(record, types.NewFloat64Causet(st.lockKeysTime)) 638 record = append(record, types.NewUintCauset(st.requestCount)) 639 record = append(record, types.NewUintCauset(st.totalKeys)) 640 record = append(record, types.NewUintCauset(st.processKeys)) 641 record = append(record, types.NewStringCauset(st.EDB)) 642 record = append(record, types.NewStringCauset(st.indexIDs)) 643 record = append(record, types.NewCauset(st.isInternal)) 644 record = append(record, types.NewStringCauset(st.digest)) 645 record = append(record, types.NewStringCauset(st.statsInfo)) 646 record = append(record, types.NewFloat64Causet(st.avgProcessTime)) 647 record = append(record, types.NewFloat64Causet(st.p90ProcessTime)) 648 record = append(record, types.NewFloat64Causet(st.maxProcessTime)) 649 record = append(record, types.NewStringCauset(st.maxProcessAddress)) 650 record = append(record, types.NewFloat64Causet(st.avgWaitTime)) 651 record = append(record, types.NewFloat64Causet(st.p90WaitTime)) 652 record = append(record, types.NewFloat64Causet(st.maxWaitTime)) 653 record = append(record, types.NewStringCauset(st.maxWaitAddress)) 654 record = append(record, types.NewIntCauset(st.memMax)) 655 record = append(record, types.NewIntCauset(st.diskMax)) 656 if st.succ { 657 record = append(record, types.NewIntCauset(1)) 658 } else { 659 record = append(record, types.NewIntCauset(0)) 660 } 661 if st.planFromCache { 662 record = append(record, types.NewIntCauset(1)) 663 } else { 664 record = append(record, types.NewIntCauset(0)) 665 } 666 record = append(record, types.NewStringCauset(parseCauset(st.plan))) 667 record = append(record, types.NewStringCauset(st.planDigest)) 668 record = append(record, types.NewStringCauset(st.prevStmt)) 669 record = append(record, types.NewStringCauset(st.allegrosql)) 670 return record 671 } 672 673 func parseCauset(planString string) string { 674 if len(planString) <= len(variable.SlowLogCausetPrefix)+len(variable.SlowLogCausetSuffix) { 675 return planString 676 } 677 planString = planString[len(variable.SlowLogCausetPrefix) : len(planString)-len(variable.SlowLogCausetSuffix)] 678 decodeCausetString, err := plancodec.DecodeCauset(planString) 679 if err == nil { 680 planString = decodeCausetString 681 } else { 682 logutil.BgLogger().Error("decode plan in slow log failed", zap.String("plan", planString), zap.Error(err)) 683 } 684 return planString 685 } 686 687 // ParseTime exports for testing. 688 func ParseTime(s string) (time.Time, error) { 689 t, err := time.Parse(logutil.SlowLogTimeFormat, s) 690 if err != nil { 691 // This is for compatibility. 692 t, err = time.Parse(logutil.OldSlowLogTimeFormat, s) 693 if err != nil { 694 err = errors.Errorf("string \"%v\" doesn't has a prefix that matches format \"%v\", err: %v", s, logutil.SlowLogTimeFormat, err) 695 } 696 } 697 return t, err 698 } 699 700 type logFile struct { 701 file *os.File // The opened file handle 702 start, end time.Time // The start/end time of the log file 703 } 704 705 // getAllFiles is used to get all slow-log needed to parse, it is exported for test. 706 func (e *slowQueryRetriever) getAllFiles(sctx stochastikctx.Context, logFilePath string) ([]logFile, error) { 707 if e.extractor == nil || !e.extractor.Enable { 708 file, err := os.Open(logFilePath) 709 if err != nil { 710 if os.IsNotExist(err) { 711 return nil, nil 712 } 713 return nil, err 714 } 715 return []logFile{{file: file}}, nil 716 } 717 var logFiles []logFile 718 logDir := filepath.Dir(logFilePath) 719 ext := filepath.Ext(logFilePath) 720 prefix := logFilePath[:len(logFilePath)-len(ext)] 721 handleErr := func(err error) error { 722 // Ignore the error and append warning for usability. 723 if err != io.EOF { 724 sctx.GetStochastikVars().StmtCtx.AppendWarning(err) 725 } 726 return nil 727 } 728 err := filepath.Walk(logDir, func(path string, info os.FileInfo, err error) error { 729 if err != nil { 730 return handleErr(err) 731 } 732 if info.IsDir() { 733 return nil 734 } 735 // All rotated log files have the same prefix with the original file. 736 if !strings.HasPrefix(path, prefix) { 737 return nil 738 } 739 file, err := os.OpenFile(path, os.O_RDONLY, os.ModePerm) 740 if err != nil { 741 return handleErr(err) 742 } 743 skip := false 744 defer func() { 745 if !skip { 746 terror.Log(file.Close()) 747 } 748 }() 749 // Get the file start time. 750 fileStartTime, err := e.getFileStartTime(file) 751 if err != nil { 752 return handleErr(err) 753 } 754 start := types.NewTime(types.FromGoTime(fileStartTime), allegrosql.TypeDatetime, types.MaxFsp) 755 if start.Compare(e.checker.endTime) > 0 { 756 return nil 757 } 758 759 // Get the file end time. 760 fileEndTime, err := e.getFileEndTime(file) 761 if err != nil { 762 return handleErr(err) 763 } 764 end := types.NewTime(types.FromGoTime(fileEndTime), allegrosql.TypeDatetime, types.MaxFsp) 765 if end.Compare(e.checker.startTime) < 0 { 766 return nil 767 } 768 _, err = file.Seek(0, io.SeekStart) 769 if err != nil { 770 return handleErr(err) 771 } 772 logFiles = append(logFiles, logFile{ 773 file: file, 774 start: fileStartTime, 775 end: fileEndTime, 776 }) 777 skip = true 778 return nil 779 }) 780 // Sort by start time 781 sort.Slice(logFiles, func(i, j int) bool { 782 return logFiles[i].start.Before(logFiles[j].start) 783 }) 784 return logFiles, err 785 } 786 787 func (e *slowQueryRetriever) getFileStartTime(file *os.File) (time.Time, error) { 788 var t time.Time 789 _, err := file.Seek(0, io.SeekStart) 790 if err != nil { 791 return t, err 792 } 793 reader := bufio.NewReader(file) 794 maxNum := 128 795 for { 796 lineByte, err := getOneLine(reader) 797 if err != nil { 798 return t, err 799 } 800 line := string(lineByte) 801 if strings.HasPrefix(line, variable.SlowLogStartPrefixStr) { 802 return ParseTime(line[len(variable.SlowLogStartPrefixStr):]) 803 } 804 maxNum -= 1 805 if maxNum <= 0 { 806 break 807 } 808 } 809 return t, errors.Errorf("malform slow query file %v", file.Name()) 810 } 811 func (e *slowQueryRetriever) getFileEndTime(file *os.File) (time.Time, error) { 812 var t time.Time 813 stat, err := file.Stat() 814 if err != nil { 815 return t, err 816 } 817 fileSize := stat.Size() 818 cursor := int64(0) 819 line := make([]byte, 0, 64) 820 maxLineNum := 128 821 tryGetTime := func(line []byte) string { 822 for i, j := 0, len(line)-1; i < j; i, j = i+1, j-1 { 823 line[i], line[j] = line[j], line[i] 824 } 825 lineStr := string(line) 826 lineStr = strings.TrimSpace(lineStr) 827 if strings.HasPrefix(lineStr, variable.SlowLogStartPrefixStr) { 828 return lineStr[len(variable.SlowLogStartPrefixStr):] 829 } 830 return "" 831 } 832 for { 833 cursor -= 1 834 _, err := file.Seek(cursor, io.SeekEnd) 835 if err != nil { 836 return t, err 837 } 838 839 char := make([]byte, 1) 840 _, err = file.Read(char) 841 if err != nil { 842 return t, err 843 } 844 // If find a line. 845 if cursor != -1 && (char[0] == '\n' || char[0] == '\r') { 846 if timeStr := tryGetTime(line); len(timeStr) > 0 { 847 return ParseTime(timeStr) 848 } 849 line = line[:0] 850 maxLineNum -= 1 851 } 852 line = append(line, char[0]) 853 if cursor == -fileSize || maxLineNum <= 0 { 854 if timeStr := tryGetTime(line); len(timeStr) > 0 { 855 return ParseTime(timeStr) 856 } 857 return t, errors.Errorf("malform slow query file %v", file.Name()) 858 } 859 } 860 } 861 862 func (e *slowQueryRetriever) initializeAsyncParsing(ctx context.Context, sctx stochastikctx.Context) { 863 e.parsedSlowLogCh = make(chan parsedSlowLog, 100) 864 go e.parseDataForSlowLog(ctx, sctx) 865 }