github.com/matrixorigin/matrixone@v0.7.0/pkg/frontend/query_result_test.go (about)

     1  // Copyright 2021 Matrix Origin
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //	http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package frontend
    16  
    17  import (
    18  	"context"
    19  	"fmt"
    20  	"github.com/BurntSushi/toml"
    21  	"github.com/golang/mock/gomock"
    22  	"github.com/google/uuid"
    23  	"github.com/matrixorigin/matrixone/pkg/common/mpool"
    24  	"github.com/matrixorigin/matrixone/pkg/config"
    25  	"github.com/matrixorigin/matrixone/pkg/container/batch"
    26  	"github.com/matrixorigin/matrixone/pkg/container/types"
    27  	"github.com/matrixorigin/matrixone/pkg/container/vector"
    28  	"github.com/matrixorigin/matrixone/pkg/defines"
    29  	"github.com/matrixorigin/matrixone/pkg/fileservice"
    30  	mock_frontend "github.com/matrixorigin/matrixone/pkg/frontend/test"
    31  	"github.com/matrixorigin/matrixone/pkg/pb/plan"
    32  	"github.com/matrixorigin/matrixone/pkg/sql/parsers"
    33  	"github.com/matrixorigin/matrixone/pkg/sql/parsers/dialect"
    34  	"github.com/matrixorigin/matrixone/pkg/sql/parsers/tree"
    35  	"github.com/matrixorigin/matrixone/pkg/util/trace/impl/motrace"
    36  	"github.com/stretchr/testify/assert"
    37  	"io"
    38  	"testing"
    39  )
    40  
    41  func newLocalETLFS(t *testing.T, fsName string) fileservice.FileService {
    42  	dir := t.TempDir()
    43  	fs, err := fileservice.NewLocalETLFS(fsName, dir)
    44  	assert.Nil(t, err)
    45  	return fs
    46  }
    47  
    48  func newTestSession(t *testing.T, ctrl *gomock.Controller) *Session {
    49  	var err error
    50  	var testPool *mpool.MPool
    51  	//parameter
    52  	pu := config.NewParameterUnit(&config.FrontendParameters{}, nil, nil, nil, nil)
    53  	_, err = toml.DecodeFile("test/system_vars_config.toml", pu.SV)
    54  	assert.Nil(t, err)
    55  	pu.SV.SaveQueryResult = "on"
    56  	testPool, err = mpool.NewMPool("testPool", pu.SV.GuestMmuLimitation, mpool.NoFixed)
    57  	if err != nil {
    58  		assert.Nil(t, err)
    59  	}
    60  	//file service
    61  	pu.FileService = newLocalETLFS(t, defines.SharedFileServiceName)
    62  
    63  	//io session
    64  	ioses := mock_frontend.NewMockIOSession(ctrl)
    65  	ioses.EXPECT().Write(gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
    66  	ioses.EXPECT().RemoteAddress().Return("").AnyTimes()
    67  	ioses.EXPECT().Ref().AnyTimes()
    68  	proto := NewMysqlClientProtocol(0, ioses, 1024, pu.SV)
    69  
    70  	//new session
    71  	ses := NewSession(proto, testPool, pu, GSysVariables, true)
    72  	return ses
    73  }
    74  
    75  func newBatch(ts []types.Type, rows int) *batch.Batch {
    76  	bat := batch.NewWithSize(len(ts))
    77  	bat.InitZsOne(rows)
    78  	for i, typ := range ts {
    79  		switch typ.Oid {
    80  		case types.T_int8:
    81  			vec := vector.New(typ)
    82  			vs := make([]int8, rows)
    83  			for j := range vs {
    84  				vs[j] = int8(j)
    85  			}
    86  			vec.Col = vs
    87  			bat.Vecs[i] = vec
    88  		default:
    89  			panic("invalid type")
    90  		}
    91  	}
    92  	return bat
    93  }
    94  
    95  func Test_saveQueryResultMeta(t *testing.T) {
    96  	ctrl := gomock.NewController(t)
    97  	defer ctrl.Finish()
    98  	var err error
    99  	var retColDef *plan.ResultColDef
   100  	var files []resultFileInfo
   101  	//prepare session
   102  	ses := newTestSession(t, ctrl)
   103  	_ = ses.SetGlobalVar("save_query_result", int8(1))
   104  	defer ses.Dispose()
   105  	const blockCnt int = 3
   106  
   107  	tenant := &TenantInfo{
   108  		Tenant:   sysAccountName,
   109  		TenantID: sysAccountID,
   110  	}
   111  	ses.SetTenantInfo(tenant)
   112  
   113  	//three columns
   114  	typs := []types.Type{
   115  		types.T_int8.ToType(),
   116  		types.T_int8.ToType(),
   117  		types.T_int8.ToType(),
   118  	}
   119  
   120  	colDefs := make([]*plan.ColDef, len(typs))
   121  	for i, ty := range typs {
   122  		colDefs[i] = &plan.ColDef{
   123  			Name: fmt.Sprintf("a_%d", i),
   124  			Typ: &plan.Type{
   125  				Id:    int32(ty.Oid),
   126  				Size:  ty.Size,
   127  				Scale: ty.Scale,
   128  				Width: ty.Width,
   129  			},
   130  		}
   131  	}
   132  
   133  	ses.rs = &plan.ResultColDef{
   134  		ResultCols: colDefs,
   135  	}
   136  
   137  	testUUID := uuid.NullUUID{}.UUID
   138  	ses.tStmt = &motrace.StatementInfo{
   139  		StatementID: testUUID,
   140  	}
   141  
   142  	ctx := context.Background()
   143  	asts, err := parsers.Parse(ctx, dialect.MYSQL, "select a,b,c from t")
   144  	assert.Nil(t, err)
   145  
   146  	ses.ast = asts[0]
   147  	ses.p = &plan.Plan{}
   148  
   149  	yes := openSaveQueryResult(ses)
   150  	assert.True(t, yes)
   151  
   152  	ses.requestCtx = context.Background()
   153  
   154  	//result string
   155  	wantResult := "0,0,0\n1,1,1\n2,2,2\n0,0,0\n1,1,1\n2,2,2\n0,0,0\n1,1,1\n2,2,2\n"
   156  	//save blocks
   157  	for i := 0; i < blockCnt; i++ {
   158  		data := newBatch(typs, blockCnt)
   159  		err = saveQueryResult(ses, data)
   160  		assert.Nil(t, err)
   161  	}
   162  
   163  	//save result meta
   164  	err = saveQueryResultMeta(ses)
   165  	assert.Nil(t, err)
   166  
   167  	retColDef, err = openResultMeta(ctx, ses, testUUID.String())
   168  	assert.Nil(t, err)
   169  	assert.NotNil(t, retColDef)
   170  
   171  	files, err = getResultFiles(ctx, ses, testUUID.String())
   172  	assert.Nil(t, err)
   173  	assert.Equal(t, len(files), blockCnt)
   174  	for i := 0; i < blockCnt; i++ {
   175  		assert.NotEqual(t, files[i].size, int64(0))
   176  		assert.Equal(t, files[i].blockIndex, int64(i+1))
   177  	}
   178  
   179  	//dump
   180  	exportFilePath := fileservice.JoinPath(defines.SharedFileServiceName, "/block3.csv")
   181  	ep := &tree.ExportParam{
   182  		Outfile:  true,
   183  		QueryId:  testUUID.String(),
   184  		FilePath: exportFilePath,
   185  		Fields: &tree.Fields{
   186  			Terminated: ",",
   187  			EnclosedBy: '"',
   188  		},
   189  		Lines: &tree.Lines{
   190  			TerminatedBy: "\n",
   191  		},
   192  		MaxFileSize: 0,
   193  		Header:      false,
   194  		ForceQuote:  nil,
   195  	}
   196  	err = doDumpQueryResult(ctx, ses, ep)
   197  	assert.Nil(t, err)
   198  
   199  	fs := ses.GetParameterUnit().FileService
   200  
   201  	//csvBuf := &bytes.Buffer{}
   202  	var r io.ReadCloser
   203  	err = fs.Read(ctx, &fileservice.IOVector{
   204  		FilePath: exportFilePath,
   205  		Entries: []fileservice.IOEntry{
   206  			{
   207  				Offset: 0,
   208  				Size:   -1,
   209  				//WriterForRead: csvBuf,
   210  				ReadCloserForRead: &r,
   211  			},
   212  		},
   213  	})
   214  	assert.Nil(t, err)
   215  	content, err := io.ReadAll(r)
   216  	assert.Nil(t, err)
   217  	assert.Nil(t, r.Close())
   218  	assert.Equal(t, wantResult, string(content))
   219  	//fmt.Println(string(content))
   220  }
   221  
   222  func Test_getFileSize(t *testing.T) {
   223  	files := []fileservice.DirEntry{
   224  		{Name: "a", IsDir: false, Size: 1},
   225  	}
   226  	assert.Equal(t, int64(1), getFileSize(files, "a"))
   227  	assert.Equal(t, int64(-1), getFileSize(files, "b"))
   228  }