github.com/matrixorigin/matrixone@v1.2.0/pkg/frontend/query_result.go (about)

     1  // Copyright 2021 Matrix Origin
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //      http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package frontend
    16  
    17  import (
    18  	"context"
    19  	"encoding/json"
    20  	"fmt"
    21  	"strings"
    22  	"time"
    23  
    24  	"github.com/google/uuid"
    25  	"go.uber.org/zap"
    26  
    27  	"github.com/matrixorigin/matrixone/pkg/catalog"
    28  	"github.com/matrixorigin/matrixone/pkg/common/moerr"
    29  	"github.com/matrixorigin/matrixone/pkg/common/mpool"
    30  	"github.com/matrixorigin/matrixone/pkg/container/batch"
    31  	"github.com/matrixorigin/matrixone/pkg/container/types"
    32  	"github.com/matrixorigin/matrixone/pkg/container/vector"
    33  	"github.com/matrixorigin/matrixone/pkg/defines"
    34  	"github.com/matrixorigin/matrixone/pkg/fileservice"
    35  	"github.com/matrixorigin/matrixone/pkg/frontend/constant"
    36  	"github.com/matrixorigin/matrixone/pkg/objectio"
    37  	"github.com/matrixorigin/matrixone/pkg/pb/plan"
    38  	"github.com/matrixorigin/matrixone/pkg/sql/parsers/tree"
    39  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/blockio"
    40  )
    41  
    42  func getQueryResultDir() string {
    43  	return fileservice.JoinPath(defines.SharedFileServiceName, "/query_result")
    44  }
    45  
    46  func getPathOfQueryResultFile(fileName string) string {
    47  	return fmt.Sprintf("%s/%s", getQueryResultDir(), fileName)
    48  }
    49  
    50  func openSaveQueryResult(ctx context.Context, ses *Session) bool {
    51  	if ses.ast == nil || ses.tStmt == nil {
    52  		return false
    53  	}
    54  	if ses.tStmt.SqlSourceType == constant.InternalSql {
    55  		return false
    56  	}
    57  	if ses.tStmt.StatementType == "Select" && ses.tStmt.SqlSourceType != constant.CloudUserSql {
    58  		return false
    59  	}
    60  	val, err := ses.GetGlobalVar(ctx, "save_query_result")
    61  	if err != nil {
    62  		return false
    63  	}
    64  	if v, _ := val.(int8); v > 0 {
    65  		if ses.blockIdx == 0 {
    66  			if err = initQueryResulConfig(ctx, ses); err != nil {
    67  				return false
    68  			}
    69  		}
    70  		return true
    71  	}
    72  	return false
    73  }
    74  
    75  func initQueryResulConfig(ctx context.Context, ses *Session) error {
    76  	val, err := ses.GetGlobalVar(ctx, "query_result_maxsize")
    77  	if err != nil {
    78  		return err
    79  	}
    80  	switch v := val.(type) {
    81  	case uint64:
    82  		ses.limitResultSize = float64(v)
    83  	case float64:
    84  		ses.limitResultSize = v
    85  	}
    86  	var p uint64
    87  	val, err = ses.GetGlobalVar(ctx, "query_result_timeout")
    88  	if err != nil {
    89  		return err
    90  	}
    91  	switch v := val.(type) {
    92  	case uint64:
    93  		p = v
    94  	case float64:
    95  		p = uint64(v)
    96  	}
    97  	ses.createdTime = time.Now()
    98  	ses.expiredTime = ses.createdTime.Add(time.Hour * time.Duration(p))
    99  	return err
   100  }
   101  
   102  func saveQueryResult(ctx context.Context, ses *Session, bat *batch.Batch) error {
   103  	s := ses.curResultSize + float64(bat.Size())/(1024*1024)
   104  	if s > ses.limitResultSize {
   105  		logInfo(ses, ses.GetDebugString(), "open save query result", zap.Float64("current result size:", s))
   106  		return nil
   107  	}
   108  	fs := getGlobalPu().FileService
   109  	// write query result
   110  	path := catalog.BuildQueryResultPath(ses.GetTenantInfo().GetTenant(), uuid.UUID(ses.tStmt.StatementID).String(), ses.GetIncBlockIdx())
   111  	logInfo(ses, ses.GetDebugString(), "open save query result", zap.String("statemant id is:", uuid.UUID(ses.tStmt.StatementID).String()), zap.String("fileservice name is:", fs.Name()), zap.String("write path is:", path), zap.Float64("current result size:", s))
   112  	writer, err := objectio.NewObjectWriterSpecial(objectio.WriterQueryResult, path, fs)
   113  	if err != nil {
   114  		return err
   115  	}
   116  	_, err = writer.Write(bat)
   117  	if err != nil {
   118  		return err
   119  	}
   120  	option := objectio.WriteOptions{
   121  		Type: objectio.WriteTS,
   122  		Val:  ses.expiredTime,
   123  	}
   124  	_, err = writer.WriteEnd(ctx, option)
   125  	if err != nil {
   126  		return err
   127  	}
   128  	ses.curResultSize = s
   129  	return err
   130  }
   131  
   132  func saveQueryResultMeta(ctx context.Context, ses *Session) error {
   133  	defer func() {
   134  		ses.ResetBlockIdx()
   135  		ses.p = nil
   136  		// TIPs: Session.SetTStmt() do reset the tStmt while query is DONE.
   137  		// Be careful, if you want to do async op.
   138  		ses.tStmt = nil
   139  		ses.curResultSize = 0
   140  	}()
   141  	fs := getGlobalPu().FileService
   142  	// write query result meta
   143  	colMap, err := buildColumnMap(ctx, ses.rs)
   144  	if err != nil {
   145  		return err
   146  	}
   147  	b, err := ses.rs.Marshal()
   148  	if err != nil {
   149  		return err
   150  	}
   151  	buf := new(strings.Builder)
   152  	prefix := ",\n"
   153  	for i := 1; i <= ses.blockIdx; i++ {
   154  		if i > 1 {
   155  			buf.WriteString(prefix)
   156  		}
   157  		buf.WriteString(catalog.BuildQueryResultPath(ses.GetTenantInfo().GetTenant(), uuid.UUID(ses.tStmt.StatementID).String(), i))
   158  	}
   159  
   160  	var sp []byte
   161  	if ses.p != nil {
   162  		sp, err = ses.p.Marshal()
   163  		if err != nil {
   164  			return err
   165  		}
   166  	}
   167  
   168  	st, err := simpleAstMarshal(ses.ast)
   169  	if err != nil {
   170  		return err
   171  	}
   172  	m := &catalog.Meta{
   173  		QueryId:     ses.tStmt.StatementID,
   174  		Statement:   ses.tStmt.Statement,
   175  		AccountId:   ses.GetTenantInfo().GetTenantID(),
   176  		RoleId:      ses.tStmt.RoleId,
   177  		ResultPath:  buf.String(),
   178  		CreateTime:  types.UnixToTimestamp(ses.createdTime.Unix()),
   179  		ResultSize:  ses.curResultSize,
   180  		Columns:     string(b),
   181  		Tables:      getTablesFromPlan(ses.p),
   182  		UserId:      ses.GetTenantInfo().GetUserID(),
   183  		ExpiredTime: types.UnixToTimestamp(ses.expiredTime.Unix()),
   184  		Plan:        string(sp),
   185  		Ast:         string(st),
   186  		ColumnMap:   colMap,
   187  	}
   188  	metaBat, err := buildQueryResultMetaBatch(m, ses.pool)
   189  	if err != nil {
   190  		return err
   191  	}
   192  	metaPath := catalog.BuildQueryResultMetaPath(ses.GetTenantInfo().GetTenant(), uuid.UUID(ses.tStmt.StatementID).String())
   193  	metaWriter, err := objectio.NewObjectWriterSpecial(objectio.WriterQueryResult, metaPath, fs)
   194  	if err != nil {
   195  		return err
   196  	}
   197  	_, err = metaWriter.Write(metaBat)
   198  	if err != nil {
   199  		return err
   200  	}
   201  	option := objectio.WriteOptions{
   202  		Type: objectio.WriteTS,
   203  		Val:  ses.expiredTime,
   204  	}
   205  	_, err = metaWriter.WriteEnd(ctx, option)
   206  	if err != nil {
   207  		return err
   208  	}
   209  	return err
   210  }
   211  
   212  func buildColumnMap(ctx context.Context, rs *plan.ResultColDef) (string, error) {
   213  	if rs == nil {
   214  		return "", moerr.NewInternalError(ctx, "resultColDef is nil")
   215  	}
   216  	m := make(map[string][]int)
   217  	org := make([]string, len(rs.ResultCols))
   218  	for i, col := range rs.ResultCols {
   219  		org[i] = col.Name
   220  		v := m[col.Name]
   221  		m[col.Name] = append(v, i)
   222  	}
   223  	for _, v := range m {
   224  		if len(v) > 1 {
   225  			for i := range v {
   226  				rs.ResultCols[v[i]].Name = fmt.Sprintf("%s_%d", rs.ResultCols[v[i]].Name, i)
   227  			}
   228  		}
   229  	}
   230  	buf := new(strings.Builder)
   231  	for i := range org {
   232  		if i > 0 {
   233  			buf.WriteString(", ")
   234  		}
   235  		if len(rs.ResultCols[i].Typ.Table) > 0 {
   236  			buf.WriteString(fmt.Sprintf("%s.%s -> %s", rs.ResultCols[i].Typ.Table, org[i], rs.ResultCols[i].Name))
   237  		} else {
   238  			buf.WriteString(fmt.Sprintf("%s -> %s", org[i], rs.ResultCols[i].Name))
   239  		}
   240  	}
   241  	return buf.String(), nil
   242  }
   243  
   244  func isResultQuery(p *plan.Plan) []string {
   245  	var uuids []string = nil
   246  	if q, ok := p.Plan.(*plan.Plan_Query); ok {
   247  		for _, n := range q.Query.Nodes {
   248  			if n.NodeType == plan.Node_EXTERNAL_SCAN {
   249  				if n.TableDef.TableType == "query_result" {
   250  					uuids = append(uuids, n.TableDef.Name)
   251  				}
   252  			} else if n.NodeType == plan.Node_FUNCTION_SCAN {
   253  				if n.TableDef.TblFunc.Name == "meta_scan" {
   254  					uuids = append(uuids, n.TableDef.Name)
   255  				}
   256  			}
   257  		}
   258  	}
   259  	return uuids
   260  }
   261  
   262  func checkPrivilege(uuids []string, reqCtx context.Context, ses *Session) error {
   263  	f := getGlobalPu().FileService
   264  	for _, id := range uuids {
   265  		// var size int64 = -1
   266  		path := catalog.BuildQueryResultMetaPath(ses.GetTenantInfo().GetTenant(), id)
   267  		reader, err := blockio.NewFileReader(f, path)
   268  		if err != nil {
   269  			return err
   270  		}
   271  		idxs := []uint16{catalog.PLAN_IDX, catalog.AST_IDX}
   272  		bats, closeCB, err := reader.LoadAllColumns(reqCtx, idxs, ses.GetMemPool())
   273  		if err != nil {
   274  			return err
   275  		}
   276  		defer func() {
   277  			if closeCB != nil {
   278  				closeCB()
   279  			}
   280  		}()
   281  		bat := bats[0]
   282  		p := bat.Vecs[0].GetStringAt(0)
   283  		pn := &plan.Plan{}
   284  		if err = pn.Unmarshal([]byte(p)); err != nil {
   285  			return err
   286  		}
   287  		a := bat.Vecs[1].GetStringAt(0)
   288  		var ast tree.Statement
   289  		if ast, err = simpleAstUnmarshal([]byte(a)); err != nil {
   290  			return err
   291  		}
   292  		if err = authenticateCanExecuteStatementAndPlan(reqCtx, ses, ast, pn); err != nil {
   293  			return err
   294  		}
   295  	}
   296  	return nil
   297  }
   298  
   299  func maySaveQueryResult(ctx context.Context, ses *Session, bat *batch.Batch) error {
   300  	if openSaveQueryResult(ctx, ses) {
   301  		if err := saveQueryResult(ctx, ses, bat); err != nil {
   302  			return err
   303  		}
   304  		if err := saveQueryResultMeta(ctx, ses); err != nil {
   305  			return err
   306  		}
   307  	}
   308  	return nil
   309  }
   310  
   311  type simpleAst struct {
   312  	Typ int `json:"age"`
   313  	// opt which fun of determinePrivilegeSetOfStatement need
   314  }
   315  
   316  type astType int
   317  
   318  const (
   319  	astShowNone astType = iota
   320  	astSelect
   321  	astShowAboutTable
   322  	astExplain
   323  	astValues
   324  	astExecute
   325  )
   326  
   327  func simpleAstMarshal(stmt tree.Statement) ([]byte, error) {
   328  	s := simpleAst{}
   329  	switch stmt.(type) {
   330  	case *tree.Select:
   331  		s.Typ = int(astSelect)
   332  	case *tree.ShowTables, *tree.ShowSequences, *tree.ShowCreateTable, *tree.ShowColumns, *tree.ShowCreateView, *tree.ShowCreateDatabase:
   333  		s.Typ = int(astShowAboutTable)
   334  	case *tree.ShowProcessList, *tree.ShowErrors, *tree.ShowWarnings, *tree.ShowVariables,
   335  		*tree.ShowStatus, *tree.ShowTarget, *tree.ShowTableStatus,
   336  		*tree.ShowGrants, *tree.ShowIndex,
   337  		*tree.ShowTableNumber, *tree.ShowColumnNumber,
   338  		*tree.ShowTableValues, *tree.ShowNodeList,
   339  		*tree.ShowLocks, *tree.ShowFunctionOrProcedureStatus, *tree.ShowConnectors:
   340  		s.Typ = int(astShowNone)
   341  	case *tree.ExplainFor, *tree.ExplainAnalyze, *tree.ExplainStmt:
   342  		s.Typ = int(astExplain)
   343  	case *tree.Execute:
   344  		s.Typ = int(astExecute)
   345  	case *tree.ValuesStatement:
   346  		s.Typ = int(astValues)
   347  	default:
   348  		s.Typ = int(astShowNone)
   349  	}
   350  	return json.Marshal(s)
   351  }
   352  
   353  func simpleAstUnmarshal(b []byte) (tree.Statement, error) {
   354  	s := &simpleAst{}
   355  	if err := json.Unmarshal(b, s); err != nil {
   356  		return nil, err
   357  	}
   358  	var stmt tree.Statement
   359  	switch astType(s.Typ) {
   360  	case astSelect:
   361  		stmt = &tree.Select{}
   362  	case astShowAboutTable:
   363  		stmt = &tree.ShowTables{}
   364  	case astShowNone:
   365  		stmt = &tree.ShowStatus{}
   366  	case astExplain:
   367  		stmt = &tree.ExplainFor{}
   368  	case astExecute:
   369  		stmt = &tree.Execute{}
   370  	case astValues:
   371  		stmt = &tree.ValuesStatement{}
   372  	}
   373  	return stmt, nil
   374  }
   375  
   376  func getTablesFromPlan(p *plan.Plan) string {
   377  	if p == nil {
   378  		return ""
   379  	}
   380  	buf := new(strings.Builder)
   381  	cnt := 0
   382  	if q, ok := p.Plan.(*plan.Plan_Query); ok {
   383  		for _, n := range q.Query.Nodes {
   384  			if n.NodeType == plan.Node_EXTERNAL_SCAN || n.NodeType == plan.Node_TABLE_SCAN {
   385  				if cnt > 0 {
   386  					buf.WriteString(", ")
   387  				}
   388  				buf.WriteString(n.TableDef.Name)
   389  				cnt++
   390  			}
   391  		}
   392  	}
   393  	return buf.String()
   394  }
   395  
   396  func buildQueryResultMetaBatch(m *catalog.Meta, mp *mpool.MPool) (*batch.Batch, error) {
   397  	var err error
   398  	bat := batch.NewWithSize(len(catalog.MetaColTypes))
   399  	bat.SetAttributes(catalog.MetaColNames)
   400  	for i, t := range catalog.MetaColTypes {
   401  		bat.Vecs[i] = vector.NewVec(t)
   402  	}
   403  	if err = vector.AppendFixed(bat.Vecs[catalog.QUERY_ID_IDX], types.Uuid(m.QueryId), false, mp); err != nil {
   404  		return nil, err
   405  	}
   406  	if err = vector.AppendBytes(bat.Vecs[catalog.STATEMENT_IDX], []byte(m.Statement), false, mp); err != nil {
   407  		return nil, err
   408  	}
   409  	if err = vector.AppendFixed(bat.Vecs[catalog.ACCOUNT_ID_IDX], m.AccountId, false, mp); err != nil {
   410  		return nil, err
   411  	}
   412  	if err = vector.AppendFixed(bat.Vecs[catalog.ROLE_ID_IDX], m.RoleId, false, mp); err != nil {
   413  		return nil, err
   414  	}
   415  	if err = vector.AppendBytes(bat.Vecs[catalog.RESULT_PATH_IDX], []byte(m.ResultPath), false, mp); err != nil {
   416  		return nil, err
   417  	}
   418  	if err = vector.AppendFixed(bat.Vecs[catalog.CREATE_TIME_IDX], m.CreateTime, false, mp); err != nil {
   419  		return nil, err
   420  	}
   421  	if err = vector.AppendFixed(bat.Vecs[catalog.RESULT_SIZE_IDX], m.ResultSize, false, mp); err != nil {
   422  		return nil, err
   423  	}
   424  	if err = vector.AppendBytes(bat.Vecs[catalog.COLUMNS_IDX], []byte(m.Columns), false, mp); err != nil {
   425  		return nil, err
   426  	}
   427  	if err = vector.AppendBytes(bat.Vecs[catalog.TABLES_IDX], []byte(m.Tables), false, mp); err != nil {
   428  		return nil, err
   429  	}
   430  	if err = vector.AppendFixed(bat.Vecs[catalog.USER_ID_IDX], m.UserId, false, mp); err != nil {
   431  		return nil, err
   432  	}
   433  	if err = vector.AppendFixed(bat.Vecs[catalog.EXPIRED_TIME_IDX], m.ExpiredTime, false, mp); err != nil {
   434  		return nil, err
   435  	}
   436  	if err = vector.AppendBytes(bat.Vecs[catalog.PLAN_IDX], []byte(m.Plan), false, mp); err != nil {
   437  		return nil, err
   438  	}
   439  	if err = vector.AppendBytes(bat.Vecs[catalog.AST_IDX], []byte(m.Ast), false, mp); err != nil {
   440  		return nil, err
   441  	}
   442  	if err = vector.AppendBytes(bat.Vecs[catalog.COLUMN_MAP_IDX], []byte(m.ColumnMap), false, mp); err != nil {
   443  		return nil, err
   444  	}
   445  	return bat, nil
   446  }
   447  
   448  // resultFileInfo holds the info of the result file
   449  type resultFileInfo struct {
   450  	// the name of the result file
   451  	name string
   452  	// the size of the result file
   453  	size int64
   454  	// the block id of the result file
   455  	blockIndex int64
   456  }
   457  
   458  // doDumpQueryResult reads data from the query result, converts it into csv and saves it into
   459  // the file designated by the path.
   460  func doDumpQueryResult(ctx context.Context, ses *Session, eParam *tree.ExportParam) error {
   461  	var err error
   462  	var columnDefs *plan.ResultColDef
   463  	var reader *blockio.BlockReader
   464  	var blocks []objectio.BlockObject
   465  	var files []resultFileInfo
   466  
   467  	//step1: open file handler
   468  	if columnDefs, err = openResultMeta(ctx, ses, eParam.QueryId); err != nil {
   469  		return err
   470  	}
   471  
   472  	if files, err = getResultFiles(ctx, ses, eParam.QueryId); err != nil {
   473  		return err
   474  	}
   475  
   476  	//step2: read every batch from the query result
   477  	columnCount := len(columnDefs.ResultCols)
   478  	indexes := make([]uint16, columnCount)
   479  	for i := range indexes {
   480  		indexes[i] = uint16(i)
   481  	}
   482  	//=====================
   483  	// preparation
   484  	//=====================
   485  	//prepare batch
   486  
   487  	tmpBatch := batch.NewWithSize(columnCount)
   488  	defer tmpBatch.Clean(ses.GetMemPool())
   489  	//prepare result set
   490  	mrs := &MysqlResultSet{}
   491  	typs := make([]types.Type, columnCount)
   492  	for i, c := range columnDefs.ResultCols {
   493  		typs[i] = types.New(types.T(c.Typ.Id), c.Typ.Width, c.Typ.Scale)
   494  		mcol := &MysqlColumn{}
   495  		mcol.SetName(c.GetName())
   496  		err = convertEngineTypeToMysqlType(ctx, typs[i].Oid, mcol)
   497  		if err != nil {
   498  			return err
   499  		}
   500  		mrs.AddColumn(mcol)
   501  	}
   502  	mrs.Data = make([][]interface{}, 1)
   503  	for i := 0; i < 1; i++ {
   504  		mrs.Data[i] = make([]interface{}, columnCount)
   505  	}
   506  	exportParam := &ExportConfig{
   507  		userConfig: eParam,
   508  	}
   509  	//prepare output queue
   510  	oq := NewOutputQueue(ctx, ses, columnCount, mrs, exportParam)
   511  	oq.reset()
   512  	oq.ep.OutTofile = true
   513  	//prepare export param
   514  	exportParam.DefaultBufSize = getGlobalPu().SV.ExportDataDefaultFlushSize
   515  	exportParam.UseFileService = true
   516  	exportParam.FileService = getGlobalPu().FileService
   517  	exportParam.Ctx = ctx
   518  	defer func() {
   519  		exportParam.LineBuffer = nil
   520  		exportParam.OutputStr = nil
   521  		if exportParam.AsyncReader != nil {
   522  			_ = exportParam.AsyncReader.Close()
   523  		}
   524  		if exportParam.AsyncWriter != nil {
   525  			_ = exportParam.AsyncWriter.Close()
   526  		}
   527  	}()
   528  	initExportFileParam(exportParam, mrs)
   529  
   530  	//open output file
   531  	if err = openNewFile(ctx, exportParam, mrs); err != nil {
   532  		return err
   533  	}
   534  
   535  	//read all files
   536  	for _, file := range files {
   537  		reader, blocks, err = openResultFile(ctx, ses, file.name, file.size)
   538  		if err != nil {
   539  			return err
   540  		}
   541  
   542  		quit := false
   543  		//read every block
   544  		for _, block := range blocks {
   545  			select {
   546  			case <-ctx.Done():
   547  				quit = true
   548  			default:
   549  			}
   550  
   551  			if quit {
   552  				break
   553  			}
   554  			tmpBatch.Clean(ses.GetMemPool())
   555  			bat, release, err := reader.LoadColumns(ctx, indexes, nil, block.BlockHeader().BlockID().Sequence(), ses.GetMemPool())
   556  			if err != nil {
   557  				return err
   558  			}
   559  			defer release()
   560  			tmpBatch = bat
   561  
   562  			//step2.1: converts it into the csv string
   563  			//step2.2: writes the csv string into the outfile
   564  			n := tmpBatch.RowCount()
   565  			for j := 0; j < n; j++ { //row index
   566  				select {
   567  				case <-ctx.Done():
   568  					quit = true
   569  				default:
   570  				}
   571  
   572  				if quit {
   573  					break
   574  				}
   575  
   576  				_, err = extractRowFromEveryVector(ctx, ses, tmpBatch, j, oq, true)
   577  				if err != nil {
   578  					return err
   579  				}
   580  			}
   581  		}
   582  	}
   583  
   584  	err = oq.flush()
   585  	if err != nil {
   586  		return err
   587  	}
   588  
   589  	err = Close(exportParam)
   590  	if err != nil {
   591  		return err
   592  	}
   593  
   594  	return err
   595  }
   596  
   597  // openResultMeta checks the query result of the queryId exists or not
   598  func openResultMeta(ctx context.Context, ses *Session, queryId string) (*plan.ResultColDef, error) {
   599  	account := ses.GetTenantInfo()
   600  	if account == nil {
   601  		return nil, moerr.NewInternalError(ctx, "modump does not work without the account info")
   602  	}
   603  	metaFile := catalog.BuildQueryResultMetaPath(account.GetTenant(), queryId)
   604  	// read meta's meta
   605  	reader, err := blockio.NewFileReader(getGlobalPu().FileService, metaFile)
   606  	if err != nil {
   607  		return nil, err
   608  	}
   609  	idxs := make([]uint16, 1)
   610  	idxs[0] = catalog.COLUMNS_IDX
   611  	// read meta's data
   612  	bats, closeCB, err := reader.LoadAllColumns(ctx, idxs, ses.GetMemPool())
   613  	if err != nil {
   614  		if moerr.IsMoErrCode(err, moerr.ErrFileNotFound) {
   615  			return nil, moerr.NewResultFileNotFound(ctx, makeResultMetaPath(account.GetTenant(), queryId))
   616  		}
   617  		return nil, err
   618  	}
   619  	defer func() {
   620  		if closeCB != nil {
   621  			closeCB()
   622  		}
   623  	}()
   624  	vec := bats[0].Vecs[0]
   625  	def := vec.GetStringAt(0)
   626  	r := &plan.ResultColDef{}
   627  	if err = r.Unmarshal([]byte(def)); err != nil {
   628  		return nil, err
   629  	}
   630  	return r, err
   631  }
   632  
   633  // getResultFiles lists all result files of queryId
   634  func getResultFiles(ctx context.Context, ses *Session, queryId string) ([]resultFileInfo, error) {
   635  	_, str, err := ses.GetTxnCompileCtx().GetQueryResultMeta(queryId)
   636  	if err != nil {
   637  		return nil, err
   638  	}
   639  	fileList := strings.Split(str, ",")
   640  	for i := range fileList {
   641  		fileList[i] = strings.TrimSpace(fileList[i])
   642  	}
   643  	rti := make([]resultFileInfo, 0, len(fileList))
   644  	for i, file := range fileList {
   645  		e, err := getGlobalPu().FileService.StatFile(ctx, file)
   646  		if err != nil {
   647  			if moerr.IsMoErrCode(err, moerr.ErrFileNotFound) {
   648  				return nil, moerr.NewResultFileNotFound(ctx, file)
   649  			}
   650  			return nil, err
   651  		}
   652  		rti = append(rti, resultFileInfo{
   653  			name:       e.Name,
   654  			size:       e.Size,
   655  			blockIndex: int64(i + 1),
   656  		})
   657  	}
   658  
   659  	return rti, nil
   660  }
   661  
   662  // openResultFile reads all blocks of the result file
   663  func openResultFile(ctx context.Context, ses *Session, fileName string, fileSize int64) (*blockio.BlockReader, []objectio.BlockObject, error) {
   664  	// read result's blocks
   665  	filePath := getPathOfQueryResultFile(fileName)
   666  	reader, err := blockio.NewFileReader(getGlobalPu().FileService, filePath)
   667  	if err != nil {
   668  		return nil, nil, err
   669  	}
   670  	bs, err := reader.LoadAllBlocks(ctx, ses.GetMemPool())
   671  	if err != nil {
   672  		return nil, nil, err
   673  	}
   674  	return reader, bs, err
   675  }
   676  
   677  // getFileSize finds the fileName in the file handlers ,returns the file size
   678  // and returns -1 if not exists
   679  func getFileSize(files []fileservice.DirEntry, fileName string) int64 {
   680  	for _, file := range files {
   681  		if file.Name == fileName {
   682  			return file.Size
   683  		}
   684  	}
   685  	return -1
   686  }