github.com/matrixorigin/matrixone@v0.7.0/pkg/frontend/query_result.go (about) 1 // Copyright 2021 Matrix Origin 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package frontend 16 17 import ( 18 "context" 19 "encoding/json" 20 "fmt" 21 "github.com/matrixorigin/matrixone/pkg/defines" 22 "github.com/matrixorigin/matrixone/pkg/fileservice" 23 "github.com/matrixorigin/matrixone/pkg/pb/plan" 24 "sort" 25 "strconv" 26 "time" 27 28 "github.com/google/uuid" 29 "github.com/matrixorigin/matrixone/pkg/catalog" 30 "github.com/matrixorigin/matrixone/pkg/common/moerr" 31 32 "github.com/matrixorigin/matrixone/pkg/common/mpool" 33 "github.com/matrixorigin/matrixone/pkg/container/batch" 34 "github.com/matrixorigin/matrixone/pkg/container/types" 35 "github.com/matrixorigin/matrixone/pkg/container/vector" 36 "github.com/matrixorigin/matrixone/pkg/objectio" 37 "github.com/matrixorigin/matrixone/pkg/sql/parsers/tree" 38 "strings" 39 ) 40 41 const queryResultPrefix = "%s_%s_" 42 43 func getQueryResultDir() string { 44 return fileservice.JoinPath(defines.SharedFileServiceName, "/query_result") 45 } 46 47 func getPrefixOfQueryResultFile(accountName, statementId string) string { 48 return fmt.Sprintf(queryResultPrefix, accountName, statementId) 49 } 50 51 func getPathOfQueryResultFile(fileName string) string { 52 return fmt.Sprintf("%s/%s", getQueryResultDir(), fileName) 53 } 54 55 func openSaveQueryResult(ses *Session) bool { 56 if ses.ast == nil || ses.tStmt == nil { 57 return false 58 } 59 if ses.tStmt.SqlSourceType == "internal_sql" || isSimpleResultQuery(ses.ast) { 60 return false 61 } 62 val, err := ses.GetGlobalVar("save_query_result") 63 if err != nil { 64 return false 65 } 66 if v, _ := val.(int8); v > 0 { 67 if ses.blockIdx == 0 { 68 if err = initQueryResulConfig(ses); err != nil { 69 return false 70 } 71 } 72 return true 73 } 74 return false 75 } 76 77 func initQueryResulConfig(ses *Session) error { 78 val, err := ses.GetGlobalVar("query_result_maxsize") 79 if err != nil { 80 return err 81 } 82 switch v := val.(type) { 83 case uint64: 84 ses.limitResultSize = float64(v) 85 case float64: 86 ses.limitResultSize = v 87 } 88 var p uint64 89 val, err = ses.GetGlobalVar("query_result_timeout") 90 if err != nil { 91 return err 92 } 93 switch v := val.(type) { 94 case uint64: 95 p = v 96 case float64: 97 p = uint64(v) 98 } 99 ses.createdTime = time.Now() 100 ses.expiredTime = ses.createdTime.Add(time.Hour * time.Duration(p)) 101 return nil 102 } 103 104 func isSimpleResultQuery(ast tree.Statement) bool { 105 switch stmt := ast.(type) { 106 case *tree.Select: 107 if stmt.With != nil || stmt.OrderBy != nil || stmt.Ep != nil { 108 return false 109 } 110 if clause, ok := stmt.Select.(*tree.SelectClause); ok { 111 if len(clause.From.Tables) > 1 || clause.Where != nil || clause.Having != nil || len(clause.GroupBy) > 0 { 112 return false 113 } 114 t := clause.From.Tables[0] 115 // judge table 116 if j, ok := t.(*tree.JoinTableExpr); ok { 117 if j.Right != nil { 118 return false 119 } 120 if a, ok := j.Left.(*tree.AliasedTableExpr); ok { 121 if f, ok := a.Expr.(*tree.TableFunction); ok { 122 if f.Id() != "result_scan" && f.Id() != "meta_scan" { 123 return false 124 } 125 // judge proj 126 for _, selectExpr := range clause.Exprs { 127 switch selectExpr.Expr.(type) { 128 case tree.UnqualifiedStar: 129 continue 130 case *tree.UnresolvedName: 131 continue 132 default: 133 return false 134 } 135 } 136 return true 137 } 138 return false 139 } 140 return false 141 } 142 return false 143 } 144 return false 145 case *tree.ParenSelect: 146 return isSimpleResultQuery(stmt) 147 } 148 return false 149 } 150 151 func saveQueryResult(ses *Session, bat *batch.Batch) error { 152 s := ses.curResultSize + float64(bat.Size())/(1024*1024) 153 if s > ses.limitResultSize { 154 return nil 155 } 156 fs := ses.GetParameterUnit().FileService 157 // write query result 158 path := catalog.BuildQueryResultPath(ses.GetTenantInfo().GetTenant(), uuid.UUID(ses.tStmt.StatementID).String(), ses.GetIncBlockIdx()) 159 writer, err := objectio.NewObjectWriter(path, fs) 160 if err != nil { 161 return err 162 } 163 _, err = writer.Write(bat) 164 if err != nil { 165 return err 166 } 167 option := objectio.WriteOptions{ 168 Type: objectio.WriteTS, 169 Val: ses.expiredTime, 170 } 171 _, err = writer.WriteEnd(ses.requestCtx, option) 172 if err != nil { 173 return err 174 } 175 ses.curResultSize = s 176 return nil 177 } 178 179 func saveQueryResultMeta(ses *Session) error { 180 defer func() { 181 ses.ResetBlockIdx() 182 ses.p = nil 183 ses.tStmt = nil 184 ses.curResultSize = 0 185 }() 186 fs := ses.GetParameterUnit().FileService 187 // write query result meta 188 colMap := buildColumnMap(ses.rs) 189 b, err := ses.rs.Marshal() 190 if err != nil { 191 return err 192 } 193 buf := new(strings.Builder) 194 prefix := ",\n" 195 for i := 1; i <= ses.blockIdx; i++ { 196 if i > 1 { 197 buf.WriteString(prefix) 198 } 199 buf.WriteString(catalog.BuildQueryResultPath(ses.GetTenantInfo().GetTenant(), uuid.UUID(ses.tStmt.StatementID).String(), i)) 200 } 201 202 sp, err := ses.p.Marshal() 203 if err != nil { 204 return err 205 } 206 st, err := simpleAstMarshal(ses.ast) 207 if err != nil { 208 return nil 209 } 210 m := &catalog.Meta{ 211 QueryId: ses.tStmt.StatementID, 212 Statement: ses.tStmt.Statement, 213 AccountId: ses.GetTenantInfo().GetTenantID(), 214 RoleId: ses.tStmt.RoleId, 215 ResultPath: buf.String(), 216 CreateTime: types.UnixToTimestamp(ses.createdTime.Unix()), 217 ResultSize: ses.curResultSize, 218 Columns: string(b), 219 Tables: getTablesFromPlan(ses.p), 220 UserId: ses.GetTenantInfo().GetUserID(), 221 ExpiredTime: types.UnixToTimestamp(ses.expiredTime.Unix()), 222 Plan: string(sp), 223 Ast: string(st), 224 ColumnMap: colMap, 225 } 226 metaBat, err := buildQueryResultMetaBatch(m, ses.mp) 227 if err != nil { 228 return err 229 } 230 metaPath := catalog.BuildQueryResultMetaPath(ses.GetTenantInfo().GetTenant(), uuid.UUID(ses.tStmt.StatementID).String()) 231 metaWriter, err := objectio.NewObjectWriter(metaPath, fs) 232 if err != nil { 233 return err 234 } 235 _, err = metaWriter.Write(metaBat) 236 if err != nil { 237 return err 238 } 239 option := objectio.WriteOptions{ 240 Type: objectio.WriteTS, 241 Val: ses.expiredTime, 242 } 243 _, err = metaWriter.WriteEnd(ses.requestCtx, option) 244 if err != nil { 245 return err 246 } 247 return nil 248 } 249 250 func buildColumnMap(rs *plan.ResultColDef) string { 251 m := make(map[string][]int) 252 org := make([]string, len(rs.ResultCols)) 253 for i, col := range rs.ResultCols { 254 org[i] = col.Name 255 v := m[col.Name] 256 m[col.Name] = append(v, i) 257 } 258 for _, v := range m { 259 if len(v) > 1 { 260 for i := range v { 261 rs.ResultCols[v[i]].Name = fmt.Sprintf("%s_%d", rs.ResultCols[v[i]].Name, i) 262 } 263 } 264 } 265 buf := new(strings.Builder) 266 for i := range org { 267 if i > 0 { 268 buf.WriteString(", ") 269 } 270 if len(rs.ResultCols[i].Typ.Table) > 0 { 271 buf.WriteString(fmt.Sprintf("%s.%s -> %s", rs.ResultCols[i].Typ.Table, org[i], rs.ResultCols[i].Name)) 272 } else { 273 buf.WriteString(fmt.Sprintf("%s -> %s", org[i], rs.ResultCols[i].Name)) 274 } 275 } 276 return buf.String() 277 } 278 279 func isResultQuery(p *plan.Plan) []string { 280 var uuids []string = nil 281 if q, ok := p.Plan.(*plan.Plan_Query); ok { 282 for _, n := range q.Query.Nodes { 283 if n.NodeType == plan.Node_EXTERNAL_SCAN { 284 if n.TableDef.TableType == "query_result" { 285 uuids = append(uuids, n.TableDef.Name) 286 } 287 } else if n.NodeType == plan.Node_FUNCTION_SCAN { 288 if n.TableDef.TblFunc.Name == "meta_scan" { 289 uuids = append(uuids, n.TableDef.Name) 290 } 291 } 292 } 293 } 294 return uuids 295 } 296 297 func checkPrivilege(uuids []string, requestCtx context.Context, ses *Session) error { 298 f := ses.GetParameterUnit().FileService 299 fs := objectio.NewObjectFS(f, catalog.QueryResultMetaDir) 300 dirs, err := fs.ListDir(catalog.QueryResultMetaDir) 301 if err != nil { 302 return err 303 } 304 for _, id := range uuids { 305 var size int64 = -1 306 name := catalog.BuildQueryResultMetaName(ses.GetTenantInfo().GetTenant(), id) 307 for _, d := range dirs { 308 if d.Name == name { 309 size = d.Size 310 } 311 } 312 if size == -1 { 313 return moerr.NewQueryIdNotFound(requestCtx, id) 314 } 315 path := catalog.BuildQueryResultMetaPath(ses.GetTenantInfo().GetTenant(), id) 316 reader, err := objectio.NewObjectReader(path, f) 317 if err != nil { 318 return err 319 } 320 bs, err := reader.ReadAllMeta(requestCtx, size, ses.mp) 321 if err != nil { 322 return err 323 } 324 idxs := []uint16{catalog.PLAN_IDX, catalog.AST_IDX} 325 iov, err := reader.Read(requestCtx, bs[0].GetExtent(), idxs, ses.mp) 326 if err != nil { 327 return err 328 } 329 bat := batch.NewWithSize(len(idxs)) 330 for i, e := range iov.Entries { 331 bat.Vecs[i] = vector.New(catalog.MetaColTypes[idxs[i]]) 332 if err = bat.Vecs[i].Read(e.Object.([]byte)); err != nil { 333 return err 334 } 335 } 336 p := vector.MustStrCols(bat.Vecs[0])[0] 337 pn := &plan.Plan{} 338 if err = pn.Unmarshal([]byte(p)); err != nil { 339 return err 340 } 341 a := vector.MustStrCols(bat.Vecs[1])[0] 342 var ast tree.Statement 343 if ast, err = simpleAstUnmarshal([]byte(a)); err != nil { 344 return err 345 } 346 if err = authenticateCanExecuteStatementAndPlan(requestCtx, ses, ast, pn); err != nil { 347 return err 348 } 349 } 350 return nil 351 } 352 353 type simpleAst struct { 354 Typ int `json:"age"` 355 // opt which fun of determinePrivilegeSetOfStatement need 356 } 357 358 type astType int 359 360 const ( 361 astShowNone astType = iota 362 astSelect 363 astShowAboutTable 364 astExplain 365 astValues 366 astExecute 367 ) 368 369 func simpleAstMarshal(stmt tree.Statement) ([]byte, error) { 370 s := simpleAst{} 371 switch stmt.(type) { 372 case *tree.Select: 373 s.Typ = int(astSelect) 374 case *tree.ShowTables, *tree.ShowCreateTable, *tree.ShowColumns, *tree.ShowCreateView, *tree.ShowCreateDatabase: 375 s.Typ = int(astShowAboutTable) 376 case *tree.ShowProcessList, *tree.ShowErrors, *tree.ShowWarnings, *tree.ShowVariables, 377 *tree.ShowStatus, *tree.ShowTarget, *tree.ShowTableStatus, 378 *tree.ShowGrants, *tree.ShowCollation, *tree.ShowIndex, 379 *tree.ShowTableNumber, *tree.ShowColumnNumber, 380 *tree.ShowTableValues, *tree.ShowNodeList, 381 *tree.ShowLocks, *tree.ShowFunctionStatus: 382 s.Typ = int(astShowNone) 383 case *tree.ExplainFor, *tree.ExplainAnalyze, *tree.ExplainStmt: 384 s.Typ = int(astExplain) 385 case *tree.Execute: 386 s.Typ = int(astExecute) 387 case *tree.ValuesStatement: 388 s.Typ = int(astValues) 389 default: 390 s.Typ = int(astShowNone) 391 } 392 return json.Marshal(s) 393 } 394 395 func simpleAstUnmarshal(b []byte) (tree.Statement, error) { 396 s := &simpleAst{} 397 if err := json.Unmarshal(b, s); err != nil { 398 return nil, err 399 } 400 var stmt tree.Statement 401 switch astType(s.Typ) { 402 case astSelect: 403 stmt = &tree.Select{} 404 case astShowAboutTable: 405 stmt = &tree.ShowTables{} 406 case astShowNone: 407 stmt = &tree.ShowStatus{} 408 case astExplain: 409 stmt = &tree.ExplainFor{} 410 case astExecute: 411 stmt = &tree.Execute{} 412 case astValues: 413 stmt = &tree.ValuesStatement{} 414 } 415 return stmt, nil 416 } 417 418 func getTablesFromPlan(p *plan.Plan) string { 419 if p == nil { 420 return "" 421 } 422 buf := new(strings.Builder) 423 cnt := 0 424 if q, ok := p.Plan.(*plan.Plan_Query); ok { 425 for _, n := range q.Query.Nodes { 426 if n.NodeType == plan.Node_EXTERNAL_SCAN || n.NodeType == plan.Node_TABLE_SCAN { 427 if cnt > 0 { 428 buf.WriteString(", ") 429 } 430 buf.WriteString(n.TableDef.Name) 431 cnt++ 432 } 433 } 434 } 435 return buf.String() 436 } 437 438 func buildQueryResultMetaBatch(m *catalog.Meta, mp *mpool.MPool) (*batch.Batch, error) { 439 var err error 440 bat := batch.NewWithSize(len(catalog.MetaColTypes)) 441 bat.SetAttributes(catalog.MetaColNames) 442 for i, t := range catalog.MetaColTypes { 443 bat.Vecs[i] = vector.New(t) 444 } 445 if err = bat.Vecs[catalog.QUERY_ID_IDX].Append(types.Uuid(m.QueryId), false, mp); err != nil { 446 return nil, err 447 } 448 if err = bat.Vecs[catalog.STATEMENT_IDX].Append([]byte(m.Statement), false, mp); err != nil { 449 return nil, err 450 } 451 if err = bat.Vecs[catalog.ACCOUNT_ID_IDX].Append(m.AccountId, false, mp); err != nil { 452 return nil, err 453 } 454 if err = bat.Vecs[catalog.ROLE_ID_IDX].Append(m.RoleId, false, mp); err != nil { 455 return nil, err 456 } 457 if err = bat.Vecs[catalog.RESULT_PATH_IDX].Append([]byte(m.ResultPath), false, mp); err != nil { 458 return nil, err 459 } 460 if err = bat.Vecs[catalog.CREATE_TIME_IDX].Append(m.CreateTime, false, mp); err != nil { 461 return nil, err 462 } 463 if err = bat.Vecs[catalog.RESULT_SIZE_IDX].Append(m.ResultSize, false, mp); err != nil { 464 return nil, err 465 } 466 if err = bat.Vecs[catalog.COLUMNS_IDX].Append([]byte(m.Columns), false, mp); err != nil { 467 return nil, err 468 } 469 if err = bat.Vecs[catalog.TABLES_IDX].Append([]byte(m.Tables), false, mp); err != nil { 470 return nil, err 471 } 472 if err = bat.Vecs[catalog.USER_ID_IDX].Append(m.UserId, false, mp); err != nil { 473 return nil, err 474 } 475 if err = bat.Vecs[catalog.EXPIRED_TIME_IDX].Append(m.ExpiredTime, false, mp); err != nil { 476 return nil, err 477 } 478 if err = bat.Vecs[catalog.PLAN_IDX].Append([]byte(m.Plan), false, mp); err != nil { 479 return nil, err 480 } 481 if err = bat.Vecs[catalog.AST_IDX].Append([]byte(m.Ast), false, mp); err != nil { 482 return nil, err 483 } 484 if err = bat.Vecs[catalog.COLUMN_MAP_IDX].Append([]byte(m.ColumnMap), false, mp); err != nil { 485 return nil, err 486 } 487 return bat, nil 488 } 489 490 // resultFileInfo holds the info of the result file 491 type resultFileInfo struct { 492 // the name of the result file 493 name string 494 // the size of the result file 495 size int64 496 // the block id of the result file 497 blockIndex int64 498 } 499 500 // doDumpQueryResult reads data from the query result, converts it into csv and saves it into 501 // the file designated by the path. 502 func doDumpQueryResult(ctx context.Context, ses *Session, eParam *tree.ExportParam) error { 503 var err error 504 var columnDefs *plan.ResultColDef 505 var reader objectio.Reader 506 var blocks []objectio.BlockObject 507 var files []resultFileInfo 508 509 //step1: open file handler 510 if columnDefs, err = openResultMeta(ctx, ses, eParam.QueryId); err != nil { 511 return err 512 } 513 514 if files, err = getResultFiles(ctx, ses, eParam.QueryId); err != nil { 515 return err 516 } 517 518 //step2: read every batch from the query result 519 indexes := make([]uint16, len(columnDefs.ResultCols)) 520 for i := range indexes { 521 indexes[i] = uint16(i) 522 } 523 //===================== 524 // preparation 525 //===================== 526 //prepare batch 527 tmpBatch := batch.NewWithSize(len(columnDefs.ResultCols)) 528 defer tmpBatch.Clean(ses.GetMemPool()) 529 //prepare result set 530 mrs := &MysqlResultSet{} 531 typs := make([]types.Type, len(columnDefs.ResultCols)) 532 for i, c := range columnDefs.ResultCols { 533 typs[i] = types.New(types.T(c.Typ.Id), c.Typ.Width, c.Typ.Scale, c.Typ.Precision) 534 mcol := &MysqlColumn{} 535 mcol.SetName(c.GetName()) 536 err = convertEngineTypeToMysqlType(ctx, typs[i].Oid, mcol) 537 if err != nil { 538 return err 539 } 540 mrs.AddColumn(mcol) 541 } 542 mrs.Data = make([][]interface{}, 1) 543 for i := 0; i < 1; i++ { 544 mrs.Data[i] = make([]interface{}, len(columnDefs.ResultCols)) 545 } 546 exportParam := &ExportParam{ 547 ExportParam: eParam, 548 } 549 //prepare output queue 550 oq := NewOutputQueue(ctx, nil, mrs, 1, exportParam, ses.GetShowStmtType()) 551 oq.reset() 552 //prepare export param 553 exportParam.DefaultBufSize = ses.GetParameterUnit().SV.ExportDataDefaultFlushSize 554 exportParam.UseFileService = true 555 exportParam.FileService = ses.GetParameterUnit().FileService 556 exportParam.Ctx = ctx 557 defer func() { 558 exportParam.LineBuffer = nil 559 exportParam.OutputStr = nil 560 if exportParam.AsyncReader != nil { 561 _ = exportParam.AsyncReader.Close() 562 } 563 if exportParam.AsyncWriter != nil { 564 _ = exportParam.AsyncWriter.Close() 565 } 566 }() 567 initExportFileParam(exportParam, mrs) 568 569 //open output file 570 if err = openNewFile(ctx, exportParam, mrs); err != nil { 571 return err 572 } 573 574 //read all files 575 for _, file := range files { 576 reader, blocks, err = openResultFile(ctx, ses, file.name, file.size) 577 if err != nil { 578 return err 579 } 580 581 quit := false 582 //read every block 583 for _, block := range blocks { 584 select { 585 case <-ctx.Done(): 586 quit = true 587 default: 588 } 589 590 if quit { 591 break 592 } 593 tmpBatch.Clean(ses.GetMemPool()) 594 tmpBatch = batch.NewWithSize(len(columnDefs.ResultCols)) 595 ioVector, err := reader.Read(ctx, block.GetExtent(), indexes, ses.GetMemPool()) 596 if err != nil { 597 return err 598 } 599 //read every column 600 for colIndex, entry := range ioVector.Entries { 601 tmpBatch.Vecs[colIndex] = vector.New(typs[colIndex]) 602 err = tmpBatch.Vecs[colIndex].Read(entry.Object.([]byte)) 603 if err != nil { 604 return err 605 } 606 } 607 tmpBatch.InitZsOne(tmpBatch.Vecs[0].Length()) 608 609 //step2.1: converts it into the csv string 610 //step2.2: writes the csv string into the outfile 611 n := vector.Length(tmpBatch.Vecs[0]) 612 for j := 0; j < n; j++ { //row index 613 select { 614 case <-ctx.Done(): 615 quit = true 616 default: 617 } 618 619 if quit { 620 break 621 } 622 623 if tmpBatch.Zs[j] <= 0 { 624 continue 625 } 626 _, err = extractRowFromEveryVector(ses, tmpBatch, int64(j), oq) 627 if err != nil { 628 return err 629 } 630 } 631 } 632 } 633 634 err = oq.flush() 635 if err != nil { 636 return err 637 } 638 639 err = Close(exportParam) 640 if err != nil { 641 return err 642 } 643 644 return err 645 } 646 647 // openResultMeta checks the query result of the queryId exists or not 648 func openResultMeta(ctx context.Context, ses *Session, queryId string) (*plan.ResultColDef, error) { 649 metaFs := objectio.NewObjectFS(ses.GetParameterUnit().FileService, catalog.QueryResultMetaDir) 650 metaFiles, err := metaFs.ListDir(catalog.QueryResultMetaDir) 651 if err != nil { 652 return nil, err 653 } 654 account := ses.GetTenantInfo() 655 if account == nil { 656 return nil, moerr.NewInternalError(ctx, "modump does not work without the account info") 657 } 658 metaName := catalog.BuildQueryResultMetaName(account.GetTenant(), queryId) 659 fileSize := getFileSize(metaFiles, metaName) 660 if fileSize < 0 { 661 return nil, moerr.NewInternalError(ctx, "there is no result file for the query %s", queryId) 662 } 663 // read meta's meta 664 metaFile := catalog.BuildQueryResultMetaPath(account.GetTenant(), queryId) 665 reader, err := objectio.NewObjectReader(metaFile, ses.GetParameterUnit().FileService) 666 if err != nil { 667 return nil, err 668 } 669 bs, err := reader.ReadAllMeta(ctx, fileSize, ses.GetMemPool()) 670 if err != nil { 671 return nil, err 672 } 673 idxs := make([]uint16, 1) 674 idxs[0] = catalog.COLUMNS_IDX 675 // read meta's data 676 iov, err := reader.Read(ctx, bs[0].GetExtent(), idxs, ses.GetMemPool()) 677 if err != nil { 678 return nil, err 679 } 680 vec := vector.New(catalog.MetaColTypes[catalog.COLUMNS_IDX]) 681 defer vector.Clean(vec, ses.GetMemPool()) 682 if err = vec.Read(iov.Entries[0].Object.([]byte)); err != nil { 683 return nil, err 684 } 685 def := vector.MustStrCols(vec)[0] 686 r := &plan.ResultColDef{} 687 if err = r.Unmarshal([]byte(def)); err != nil { 688 return nil, err 689 } 690 return r, err 691 } 692 693 // getResultFiles lists all result files of queryId 694 func getResultFiles(ctx context.Context, ses *Session, queryId string) ([]resultFileInfo, error) { 695 fs := objectio.NewObjectFS(ses.GetParameterUnit().FileService, getQueryResultDir()) 696 files, err := fs.ListDir(getQueryResultDir()) 697 if err != nil { 698 return nil, err 699 } 700 account := ses.GetTenantInfo() 701 if account == nil { 702 return nil, moerr.NewInternalError(ctx, "modump does not work without the account info") 703 } 704 prefix := getPrefixOfQueryResultFile(account.GetTenant(), queryId) 705 ret := make([]resultFileInfo, 0, len(files)) 706 for _, file := range files { 707 if file.IsDir { 708 continue 709 } 710 if strings.HasPrefix(file.Name, prefix) { 711 if !strings.HasSuffix(file.Name, ".blk") { 712 return nil, moerr.NewInternalError(ctx, "the query result file %s has the invalid name", file.Name) 713 } 714 indexOfLastUnderbar := strings.LastIndexByte(file.Name, '_') 715 if indexOfLastUnderbar == -1 { 716 return nil, moerr.NewInternalError(ctx, "the query result file %s has the invalid name", file.Name) 717 } 718 blockIndexStart := indexOfLastUnderbar + 1 719 blockIndexEnd := len(file.Name) - len(".blk") 720 if blockIndexStart >= blockIndexEnd { 721 return nil, moerr.NewInternalError(ctx, "the query result file %s has the invalid name", file.Name) 722 } 723 blockIndexStr := file.Name[blockIndexStart:blockIndexEnd] 724 blockIndex, err := strconv.ParseInt(blockIndexStr, 10, 64) 725 if err != nil { 726 return nil, err 727 } 728 if blockIndex < 0 { 729 return nil, moerr.NewInternalError(ctx, "the query result file %s has the invalid name", file.Name) 730 } 731 ret = append(ret, resultFileInfo{ 732 name: file.Name, 733 size: file.Size, 734 blockIndex: blockIndex, 735 }) 736 } 737 } 738 sort.Slice(ret, func(i, j int) bool { 739 return ret[i].blockIndex < ret[j].blockIndex 740 }) 741 return ret, err 742 } 743 744 // openResultFile reads all blocks of the result file 745 func openResultFile(ctx context.Context, ses *Session, fileName string, fileSize int64) (objectio.Reader, []objectio.BlockObject, error) { 746 // read result's blocks 747 filePath := getPathOfQueryResultFile(fileName) 748 reader, err := objectio.NewObjectReader(filePath, ses.GetParameterUnit().FileService) 749 if err != nil { 750 return nil, nil, err 751 } 752 bs, err := reader.ReadAllMeta(ctx, fileSize, ses.GetMemPool()) 753 if err != nil { 754 return nil, nil, err 755 } 756 return reader, bs, err 757 } 758 759 // getFileSize finds the fileName in the file handlers ,returns the file size 760 // and returns -1 if not exists 761 func getFileSize(files []fileservice.DirEntry, fileName string) int64 { 762 for _, file := range files { 763 if file.Name == fileName { 764 return file.Size 765 } 766 } 767 return -1 768 }