github.com/matrixorigin/matrixone@v0.7.0/pkg/sql/compile/compile.go (about)

     1  // Copyright 2021 Matrix Origin
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //      http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package compile
    16  
    17  import (
    18  	"context"
    19  	"encoding/json"
    20  	"fmt"
    21  	"runtime"
    22  	"strings"
    23  	"sync/atomic"
    24  
    25  	"github.com/matrixorigin/matrixone/pkg/catalog"
    26  	"github.com/matrixorigin/matrixone/pkg/common/moerr"
    27  	"github.com/matrixorigin/matrixone/pkg/common/mpool"
    28  	"github.com/matrixorigin/matrixone/pkg/container/batch"
    29  	"github.com/matrixorigin/matrixone/pkg/container/types"
    30  	"github.com/matrixorigin/matrixone/pkg/container/vector"
    31  	"github.com/matrixorigin/matrixone/pkg/defines"
    32  	"github.com/matrixorigin/matrixone/pkg/pb/plan"
    33  	"github.com/matrixorigin/matrixone/pkg/pb/timestamp"
    34  	"github.com/matrixorigin/matrixone/pkg/sql/colexec"
    35  	"github.com/matrixorigin/matrixone/pkg/sql/colexec/connector"
    36  	"github.com/matrixorigin/matrixone/pkg/sql/colexec/external"
    37  	"github.com/matrixorigin/matrixone/pkg/sql/colexec/insert"
    38  	"github.com/matrixorigin/matrixone/pkg/sql/colexec/merge"
    39  	"github.com/matrixorigin/matrixone/pkg/sql/colexec/mergeblock"
    40  	"github.com/matrixorigin/matrixone/pkg/sql/colexec/output"
    41  	"github.com/matrixorigin/matrixone/pkg/sql/parsers/tree"
    42  	plan2 "github.com/matrixorigin/matrixone/pkg/sql/plan"
    43  	"github.com/matrixorigin/matrixone/pkg/sql/util"
    44  	"github.com/matrixorigin/matrixone/pkg/vm"
    45  	"github.com/matrixorigin/matrixone/pkg/vm/engine"
    46  	"github.com/matrixorigin/matrixone/pkg/vm/process"
    47  )
    48  
    49  // Note: Now the cost going from stat is actually the number of rows, so we can only estimate a number for the size of each row.
    50  // The current insertion of around 200,000 rows triggers cn to write s3 directly
    51  const (
    52  	DistributedThreshold   uint64 = 10 * mpool.MB
    53  	SingleLineSizeEstimate uint64 = 300 * mpool.B
    54  )
    55  
    56  // New is used to new an object of compile
    57  func New(addr, db string, sql string, uid string, ctx context.Context,
    58  	e engine.Engine, proc *process.Process, stmt tree.Statement) *Compile {
    59  	return &Compile{
    60  		e:    e,
    61  		db:   db,
    62  		ctx:  ctx,
    63  		uid:  uid,
    64  		sql:  sql,
    65  		proc: proc,
    66  		stmt: stmt,
    67  		addr: addr,
    68  	}
    69  }
    70  
    71  // helper function to judge if init temporary engine is needed
    72  func (c *Compile) NeedInitTempEngine(InitTempEngine bool) bool {
    73  	if InitTempEngine {
    74  		return false
    75  	}
    76  	ddl := c.scope.Plan.GetDdl()
    77  	if ddl != nil {
    78  		qry := ddl.GetCreateTable()
    79  		if qry != nil && qry.Temporary {
    80  			e := c.e.(*engine.EntireEngine).TempEngine
    81  			if e == nil {
    82  				return true
    83  			}
    84  		}
    85  	}
    86  	return false
    87  }
    88  
    89  func (c *Compile) SetTempEngine(ctx context.Context, te engine.Engine) {
    90  	e := c.e.(*engine.EntireEngine)
    91  	e.TempEngine = te
    92  	c.ctx = ctx
    93  }
    94  
    95  // Compile is the entrance of the compute-layer, it compiles AST tree to scope list.
    96  // A scope is an execution unit.
    97  func (c *Compile) Compile(ctx context.Context, pn *plan.Plan, u any, fill func(any, *batch.Batch) error) (err error) {
    98  	defer func() {
    99  		if e := recover(); e != nil {
   100  			err = moerr.ConvertPanicError(ctx, e)
   101  		}
   102  	}()
   103  	c.u = u
   104  	c.fill = fill
   105  	c.info = plan2.GetExecTypeFromPlan(pn)
   106  	// build scope for a single sql
   107  	s, err := c.compileScope(ctx, pn)
   108  	if err != nil {
   109  		return err
   110  	}
   111  	c.scope = s
   112  	c.scope.Plan = pn
   113  	return nil
   114  }
   115  
   116  func (c *Compile) setAffectedRows(n uint64) {
   117  	c.affectRows = n
   118  }
   119  
   120  func (c *Compile) GetAffectedRows() uint64 {
   121  	return c.affectRows
   122  }
   123  
   124  // Run is an important function of the compute-layer, it executes a single sql according to its scope
   125  func (c *Compile) Run(_ uint64) (err error) {
   126  	if c.scope == nil {
   127  		return nil
   128  	}
   129  	defer func() { c.scope = nil }() // free pipeline
   130  
   131  	// XXX PrintScope has a none-trivial amount of logging
   132  	// PrintScope(nil, []*Scope{c.scope})
   133  	switch c.scope.Magic {
   134  	case Normal:
   135  		defer c.fillAnalyzeInfo()
   136  		return c.scope.Run(c)
   137  	case Merge:
   138  		defer c.fillAnalyzeInfo()
   139  		return c.scope.MergeRun(c)
   140  	case MergeInsert:
   141  		defer c.fillAnalyzeInfo()
   142  		err := c.scope.MergeRun(c)
   143  		if err != nil {
   144  			return err
   145  		}
   146  		c.setAffectedRows(c.scope.Instructions[len(c.scope.Instructions)-1].Arg.(*mergeblock.Argument).AffectedRows)
   147  		return nil
   148  	case Remote:
   149  		defer c.fillAnalyzeInfo()
   150  		return c.scope.RemoteRun(c)
   151  	case CreateDatabase:
   152  		err := c.scope.CreateDatabase(c)
   153  		if err != nil {
   154  			return err
   155  		}
   156  		c.setAffectedRows(1)
   157  		return nil
   158  	case DropDatabase:
   159  		err := c.scope.DropDatabase(c)
   160  		if err != nil {
   161  			return err
   162  		}
   163  		c.setAffectedRows(1)
   164  		return nil
   165  	case CreateTable:
   166  		qry := c.scope.Plan.GetDdl().GetCreateTable()
   167  		if qry.Temporary {
   168  			return c.scope.CreateTempTable(c)
   169  		} else {
   170  			return c.scope.CreateTable(c)
   171  		}
   172  	case AlterView:
   173  		return c.scope.AlterView(c)
   174  	case DropTable:
   175  		return c.scope.DropTable(c)
   176  	case CreateIndex:
   177  		return c.scope.CreateIndex(c)
   178  	case DropIndex:
   179  		return c.scope.DropIndex(c)
   180  	case TruncateTable:
   181  		return c.scope.TruncateTable(c)
   182  	case Deletion:
   183  		defer c.fillAnalyzeInfo()
   184  		affectedRows, err := c.scope.Delete(c)
   185  		if err != nil {
   186  			return err
   187  		}
   188  		c.setAffectedRows(affectedRows)
   189  		return nil
   190  	case Insert:
   191  		defer c.fillAnalyzeInfo()
   192  		affectedRows, err := c.scope.Insert(c)
   193  		if err != nil {
   194  			return err
   195  		}
   196  		c.setAffectedRows(affectedRows)
   197  		return nil
   198  	case Update:
   199  		defer c.fillAnalyzeInfo()
   200  		affectedRows, err := c.scope.Update(c)
   201  		if err != nil {
   202  			return err
   203  		}
   204  		c.setAffectedRows(affectedRows)
   205  		return nil
   206  	}
   207  	return nil
   208  }
   209  
   210  func (c *Compile) compileScope(ctx context.Context, pn *plan.Plan) (*Scope, error) {
   211  	switch qry := pn.Plan.(type) {
   212  	case *plan.Plan_Query:
   213  		return c.compileQuery(ctx, qry.Query)
   214  	case *plan.Plan_Ddl:
   215  		switch qry.Ddl.DdlType {
   216  		case plan.DataDefinition_CREATE_DATABASE:
   217  			return &Scope{
   218  				Magic: CreateDatabase,
   219  				Plan:  pn,
   220  			}, nil
   221  		case plan.DataDefinition_DROP_DATABASE:
   222  			return &Scope{
   223  				Magic: DropDatabase,
   224  				Plan:  pn,
   225  			}, nil
   226  		case plan.DataDefinition_CREATE_TABLE:
   227  			return &Scope{
   228  				Magic: CreateTable,
   229  				Plan:  pn,
   230  			}, nil
   231  		case plan.DataDefinition_ALTER_VIEW:
   232  			return &Scope{
   233  				Magic: AlterView,
   234  				Plan:  pn,
   235  			}, nil
   236  		case plan.DataDefinition_DROP_TABLE:
   237  			return &Scope{
   238  				Magic: DropTable,
   239  				Plan:  pn,
   240  			}, nil
   241  		case plan.DataDefinition_TRUNCATE_TABLE:
   242  			return &Scope{
   243  				Magic: TruncateTable,
   244  				Plan:  pn,
   245  			}, nil
   246  		case plan.DataDefinition_CREATE_INDEX:
   247  			return &Scope{
   248  				Magic: CreateIndex,
   249  				Plan:  pn,
   250  			}, nil
   251  		case plan.DataDefinition_DROP_INDEX:
   252  			return &Scope{
   253  				Magic: DropIndex,
   254  				Plan:  pn,
   255  			}, nil
   256  		case plan.DataDefinition_SHOW_DATABASES,
   257  			plan.DataDefinition_SHOW_TABLES,
   258  			plan.DataDefinition_SHOW_COLUMNS,
   259  			plan.DataDefinition_SHOW_CREATETABLE:
   260  			return c.compileQuery(ctx, pn.GetDdl().GetQuery())
   261  			// 1、not supported: show arnings/errors/status/processlist
   262  			// 2、show variables will not return query
   263  			// 3、show create database/table need rewrite to create sql
   264  		}
   265  	}
   266  	return nil, moerr.NewNYI(ctx, fmt.Sprintf("query '%s'", pn))
   267  }
   268  
   269  func (c *Compile) cnListStrategy() {
   270  	if len(c.cnList) == 0 {
   271  		c.cnList = append(c.cnList, engine.Node{Mcpu: c.NumCPU()})
   272  	} else if len(c.cnList) > c.info.CnNumbers {
   273  		c.cnList = c.cnList[:c.info.CnNumbers]
   274  	}
   275  }
   276  
   277  func (c *Compile) compileQuery(ctx context.Context, qry *plan.Query) (*Scope, error) {
   278  	if len(qry.Steps) != 1 {
   279  		return nil, moerr.NewNYI(ctx, fmt.Sprintf("query '%s'", qry))
   280  	}
   281  	var err error
   282  	c.cnList, err = c.e.Nodes()
   283  	if err != nil {
   284  		return nil, err
   285  	}
   286  	blkNum := 0
   287  	for _, n := range qry.Nodes {
   288  		if n.NodeType == plan.Node_TABLE_SCAN {
   289  			if n.Stats != nil {
   290  				blkNum += int(n.Stats.BlockNum)
   291  			}
   292  		}
   293  	}
   294  	switch qry.StmtType {
   295  	case plan.Query_INSERT:
   296  		insertNode := qry.Nodes[qry.Steps[0]]
   297  		nodeStats := qry.Nodes[insertNode.Children[0]].Stats
   298  		if nodeStats.GetCost()*float64(SingleLineSizeEstimate) > float64(DistributedThreshold) || qry.LoadTag || blkNum >= MinBlockNum {
   299  			c.cnListStrategy()
   300  		} else {
   301  			c.cnList = engine.Nodes{engine.Node{Mcpu: c.generateCPUNumber(c.NumCPU(), blkNum)}}
   302  		}
   303  	default:
   304  		if blkNum < MinBlockNum {
   305  			c.cnList = engine.Nodes{engine.Node{Mcpu: c.generateCPUNumber(c.NumCPU(), blkNum)}}
   306  		} else {
   307  			c.cnListStrategy()
   308  		}
   309  	}
   310  
   311  	c.initAnalyze(qry)
   312  	ss, err := c.compilePlanScope(ctx, qry.Nodes[qry.Steps[0]], qry.Nodes)
   313  	if err != nil {
   314  		return nil, err
   315  	}
   316  	return c.compileApQuery(qry, ss)
   317  }
   318  
   319  func (c *Compile) compileApQuery(qry *plan.Query, ss []*Scope) (*Scope, error) {
   320  	var rs *Scope
   321  	switch qry.StmtType {
   322  	case plan.Query_DELETE:
   323  		rs = c.newMergeScope(ss)
   324  		updateScopesLastFlag([]*Scope{rs})
   325  		rs.Magic = Deletion
   326  		c.SetAnalyzeCurrent([]*Scope{rs}, c.anal.curr)
   327  		scp, err := constructDeletion(qry.Nodes[qry.Steps[0]], c.e, c.proc)
   328  		if err != nil {
   329  			return nil, err
   330  		}
   331  		rs.Instructions = append(rs.Instructions, vm.Instruction{
   332  			Op:  vm.Deletion,
   333  			Arg: scp,
   334  		})
   335  	case plan.Query_INSERT:
   336  		insertNode := qry.Nodes[qry.Steps[0]]
   337  		insertNode.NotCacheable = true
   338  		arg, err := constructInsert(insertNode, c.e, c.proc)
   339  		if err != nil {
   340  			return nil, err
   341  		}
   342  		nodeStats := qry.Nodes[insertNode.Children[0]].Stats
   343  		if nodeStats.GetCost()*float64(SingleLineSizeEstimate) > float64(DistributedThreshold) || qry.LoadTag {
   344  			// use distributed-insert
   345  			arg.IsRemote = true
   346  			rs = c.newInsertMergeScope(arg, ss)
   347  			rs.Magic = MergeInsert
   348  			rs.Instructions = append(rs.Instructions, vm.Instruction{
   349  				Op: vm.MergeBlock,
   350  				Arg: &mergeblock.Argument{
   351  					Tbl:         arg.InsertCtx.Source,
   352  					Unique_tbls: arg.InsertCtx.UniqueSource,
   353  				},
   354  			})
   355  		} else {
   356  			rs = c.newMergeScope(ss)
   357  			rs.Magic = Insert
   358  			c.SetAnalyzeCurrent([]*Scope{rs}, c.anal.curr)
   359  			rs.Instructions = append(rs.Instructions, vm.Instruction{
   360  				Op:  vm.Insert,
   361  				Arg: arg,
   362  			})
   363  		}
   364  	case plan.Query_UPDATE:
   365  		scp, err := constructUpdate(qry.Nodes[qry.Steps[0]], c.e, c.proc)
   366  		if err != nil {
   367  			return nil, err
   368  		}
   369  		rs = c.newMergeScope(ss)
   370  		updateScopesLastFlag([]*Scope{rs})
   371  		rs.Magic = Update
   372  		c.SetAnalyzeCurrent([]*Scope{rs}, c.anal.curr)
   373  		rs.Instructions = append(rs.Instructions, vm.Instruction{
   374  			Op:  vm.Update,
   375  			Arg: scp,
   376  		})
   377  	default:
   378  		rs = c.newMergeScope(ss)
   379  		updateScopesLastFlag([]*Scope{rs})
   380  		c.SetAnalyzeCurrent([]*Scope{rs}, c.anal.curr)
   381  		rs.Instructions = append(rs.Instructions, vm.Instruction{
   382  			Op: vm.Output,
   383  			Arg: &output.Argument{
   384  				Data: c.u,
   385  				Func: c.fill,
   386  			},
   387  		})
   388  	}
   389  	return rs, nil
   390  }
   391  
   392  func constructValueScanBatch(ctx context.Context, proc *process.Process, node *plan.Node) (*batch.Batch, error) {
   393  	if node == nil || node.TableDef == nil { // like : select 1, 2
   394  		bat := batch.NewWithSize(1)
   395  		bat.Vecs[0] = vector.NewConst(types.Type{Oid: types.T_int64}, 1)
   396  		bat.Vecs[0].Col = make([]int64, 1)
   397  		bat.InitZsOne(1)
   398  		return bat, nil
   399  	}
   400  	// select * from (values row(1,1), row(2,2), row(3,3)) a;
   401  	tableDef := node.TableDef
   402  	colCount := len(tableDef.Cols)
   403  	colsData := node.RowsetData.Cols
   404  	rowCount := len(colsData[0].Data)
   405  	bat := batch.NewWithSize(colCount)
   406  	for i := 0; i < colCount; i++ {
   407  		vec, err := rowsetDataToVector(ctx, proc, colsData[i].Data)
   408  		if err != nil {
   409  			return nil, err
   410  		}
   411  		bat.Vecs[i] = vec
   412  	}
   413  	bat.SetZs(rowCount, proc.Mp())
   414  	return bat, nil
   415  }
   416  
   417  func (c *Compile) compilePlanScope(ctx context.Context, n *plan.Node, ns []*plan.Node) ([]*Scope, error) {
   418  	switch n.NodeType {
   419  	case plan.Node_VALUE_SCAN:
   420  		ds := &Scope{Magic: Normal}
   421  		ds.Proc = process.NewWithAnalyze(c.proc, c.ctx, 0, c.anal.Nodes())
   422  		bat, err := constructValueScanBatch(ctx, c.proc, n)
   423  		if err != nil {
   424  			return nil, err
   425  		}
   426  		ds.DataSource = &Source{Bat: bat}
   427  		return c.compileSort(n, c.compileProjection(n, []*Scope{ds})), nil
   428  	case plan.Node_EXTERNAL_SCAN:
   429  		node := plan2.DeepCopyNode(n)
   430  		ss, err := c.compileExternScan(ctx, node)
   431  		if err != nil {
   432  			return nil, err
   433  		}
   434  		return c.compileSort(n, c.compileProjection(n, c.compileRestrict(node, ss))), nil
   435  	case plan.Node_TABLE_SCAN:
   436  		ss, err := c.compileTableScan(n)
   437  		if err != nil {
   438  			return nil, err
   439  		}
   440  		// RelationName
   441  		return c.compileSort(n, c.compileProjection(n, c.compileRestrict(n, ss))), nil
   442  	case plan.Node_FILTER:
   443  		curr := c.anal.curr
   444  		c.SetAnalyzeCurrent(nil, int(n.Children[0]))
   445  		ss, err := c.compilePlanScope(ctx, ns[n.Children[0]], ns)
   446  		if err != nil {
   447  			return nil, err
   448  		}
   449  		c.SetAnalyzeCurrent(ss, curr)
   450  		return c.compileSort(n, c.compileProjection(n, c.compileRestrict(n, ss))), nil
   451  	case plan.Node_PROJECT:
   452  		curr := c.anal.curr
   453  		c.SetAnalyzeCurrent(nil, int(n.Children[0]))
   454  		ss, err := c.compilePlanScope(ctx, ns[n.Children[0]], ns)
   455  		if err != nil {
   456  			return nil, err
   457  		}
   458  		c.SetAnalyzeCurrent(ss, curr)
   459  		return c.compileSort(n, c.compileProjection(n, c.compileRestrict(n, ss))), nil
   460  	case plan.Node_AGG:
   461  		curr := c.anal.curr
   462  		c.SetAnalyzeCurrent(nil, int(n.Children[0]))
   463  		ss, err := c.compilePlanScope(ctx, ns[n.Children[0]], ns)
   464  		if err != nil {
   465  			return nil, err
   466  		}
   467  		c.SetAnalyzeCurrent(ss, curr)
   468  		if len(n.GroupBy) == 0 || !c.info.WithBigMem {
   469  			ss = c.compileAgg(n, ss, ns)
   470  		} else {
   471  			ss = c.compileGroup(n, ss, ns)
   472  		}
   473  		return c.compileSort(n, c.compileProjection(n, c.compileRestrict(n, ss))), nil
   474  	case plan.Node_JOIN:
   475  		needSwap, joinTyp := joinType(ctx, n, ns)
   476  		curr := c.anal.curr
   477  		c.SetAnalyzeCurrent(nil, int(n.Children[0]))
   478  		ss, err := c.compilePlanScope(ctx, ns[n.Children[0]], ns)
   479  		if err != nil {
   480  			return nil, err
   481  		}
   482  		c.SetAnalyzeCurrent(ss, int(n.Children[1]))
   483  		children, err := c.compilePlanScope(ctx, ns[n.Children[1]], ns)
   484  		if err != nil {
   485  			return nil, err
   486  		}
   487  		c.SetAnalyzeCurrent(children, curr)
   488  		if needSwap {
   489  			return c.compileSort(n, c.compileJoin(ctx, n, ns[n.Children[0]], children, ss, joinTyp)), nil
   490  		}
   491  		return c.compileSort(n, c.compileJoin(ctx, n, ns[n.Children[1]], ss, children, joinTyp)), nil
   492  	case plan.Node_SORT:
   493  		curr := c.anal.curr
   494  		c.SetAnalyzeCurrent(nil, int(n.Children[0]))
   495  		ss, err := c.compilePlanScope(ctx, ns[n.Children[0]], ns)
   496  		if err != nil {
   497  			return nil, err
   498  		}
   499  		c.SetAnalyzeCurrent(ss, curr)
   500  		ss = c.compileSort(n, ss)
   501  		return c.compileProjection(n, c.compileRestrict(n, ss)), nil
   502  	case plan.Node_UNION:
   503  		curr := c.anal.curr
   504  		c.SetAnalyzeCurrent(nil, int(n.Children[0]))
   505  		ss, err := c.compilePlanScope(ctx, ns[n.Children[0]], ns)
   506  		if err != nil {
   507  			return nil, err
   508  		}
   509  		c.SetAnalyzeCurrent(ss, int(n.Children[1]))
   510  		children, err := c.compilePlanScope(ctx, ns[n.Children[1]], ns)
   511  		if err != nil {
   512  			return nil, err
   513  		}
   514  		c.SetAnalyzeCurrent(children, curr)
   515  		return c.compileSort(n, c.compileUnion(n, ss, children, ns)), nil
   516  	case plan.Node_MINUS, plan.Node_INTERSECT, plan.Node_INTERSECT_ALL:
   517  		curr := c.anal.curr
   518  		c.SetAnalyzeCurrent(nil, int(n.Children[0]))
   519  		ss, err := c.compilePlanScope(ctx, ns[n.Children[0]], ns)
   520  		if err != nil {
   521  			return nil, err
   522  		}
   523  		c.SetAnalyzeCurrent(ss, int(n.Children[1]))
   524  		children, err := c.compilePlanScope(ctx, ns[n.Children[1]], ns)
   525  		if err != nil {
   526  			return nil, err
   527  		}
   528  		c.SetAnalyzeCurrent(children, curr)
   529  		return c.compileSort(n, c.compileMinusAndIntersect(n, ss, children, n.NodeType)), nil
   530  	case plan.Node_UNION_ALL:
   531  		curr := c.anal.curr
   532  		c.SetAnalyzeCurrent(nil, int(n.Children[0]))
   533  		ss, err := c.compilePlanScope(ctx, ns[n.Children[0]], ns)
   534  		if err != nil {
   535  			return nil, err
   536  		}
   537  		c.SetAnalyzeCurrent(ss, int(n.Children[1]))
   538  		children, err := c.compilePlanScope(ctx, ns[n.Children[1]], ns)
   539  		if err != nil {
   540  			return nil, err
   541  		}
   542  		c.SetAnalyzeCurrent(children, curr)
   543  		return c.compileSort(n, c.compileUnionAll(n, ss, children)), nil
   544  	case plan.Node_DELETE:
   545  		if n.DeleteCtx.CanTruncate {
   546  			return nil, nil
   547  		}
   548  		ss, err := c.compilePlanScope(ctx, ns[n.Children[0]], ns)
   549  		if err != nil {
   550  			return nil, err
   551  		}
   552  		return ss, nil
   553  	case plan.Node_INSERT:
   554  		ss, err := c.compilePlanScope(ctx, ns[n.Children[0]], ns)
   555  		if err != nil {
   556  			return nil, err
   557  		}
   558  		return ss, nil
   559  		// return c.compileProjection(n, c.compileRestrict(n, ss)), nil
   560  	case plan.Node_UPDATE:
   561  		ss, err := c.compilePlanScope(ctx, ns[n.Children[0]], ns)
   562  		if err != nil {
   563  			return nil, err
   564  		}
   565  		return ss, nil
   566  	case plan.Node_FUNCTION_SCAN:
   567  		var (
   568  			pre []*Scope
   569  			err error
   570  		)
   571  		curr := c.anal.curr
   572  		c.SetAnalyzeCurrent(nil, int(n.Children[0]))
   573  		pre, err = c.compilePlanScope(ctx, ns[n.Children[0]], ns)
   574  		if err != nil {
   575  			return nil, err
   576  		}
   577  		c.SetAnalyzeCurrent(pre, curr)
   578  		ss, err := c.compileTableFunction(n, pre)
   579  		if err != nil {
   580  			return nil, err
   581  		}
   582  		return c.compileSort(n, c.compileProjection(n, c.compileRestrict(n, ss))), nil
   583  	default:
   584  		return nil, moerr.NewNYI(ctx, fmt.Sprintf("query '%s'", n))
   585  	}
   586  }
   587  
   588  func (c *Compile) ConstructScope() *Scope {
   589  	ds := &Scope{Magic: Normal}
   590  	ds.Proc = process.NewWithAnalyze(c.proc, c.ctx, 0, c.anal.Nodes())
   591  	ds.Proc.LoadTag = true
   592  	bat := batch.NewWithSize(1)
   593  	{
   594  		bat.Vecs[0] = vector.NewConst(types.Type{Oid: types.T_int64}, 1)
   595  		bat.Vecs[0].Col = make([]int64, 1)
   596  		bat.InitZsOne(1)
   597  	}
   598  	ds.DataSource = &Source{Bat: bat}
   599  	return ds
   600  }
   601  
   602  func (c *Compile) compileExternScan(ctx context.Context, n *plan.Node) ([]*Scope, error) {
   603  	mcpu := c.cnList[0].Mcpu
   604  	param := &tree.ExternParam{}
   605  	err := json.Unmarshal([]byte(n.TableDef.Createsql), param)
   606  	if param.Local {
   607  		mcpu = 1
   608  	}
   609  	if err != nil {
   610  		return nil, err
   611  	}
   612  	if param.ScanType == tree.S3 {
   613  		if err := plan2.InitS3Param(param); err != nil {
   614  			return nil, err
   615  		}
   616  		if param.Parallel {
   617  			if mcpu > external.S3_PARALLEL_MAXNUM {
   618  				mcpu = external.S3_PARALLEL_MAXNUM
   619  			}
   620  		}
   621  	} else {
   622  		if err := plan2.InitInfileParam(param); err != nil {
   623  			return nil, err
   624  		}
   625  	}
   626  
   627  	if n.ObjRef != nil {
   628  		param.SysTable = external.IsSysTable(n.ObjRef.SchemaName, n.TableDef.Name)
   629  	}
   630  
   631  	param.FileService = c.proc.FileService
   632  	param.Ctx = c.ctx
   633  	var fileList []string
   634  	var fileSize []int64
   635  	if !param.Local {
   636  		if param.QueryResult {
   637  			fileList = strings.Split(param.Filepath, ",")
   638  			for i := range fileList {
   639  				fileList[i] = strings.TrimSpace(fileList[i])
   640  			}
   641  		} else {
   642  			fileList, fileSize, err = plan2.ReadDir(param)
   643  			if err != nil {
   644  				return nil, err
   645  			}
   646  		}
   647  		fileList, fileSize, err = external.FilterFileList(n, c.proc, fileList, fileSize)
   648  		if err != nil {
   649  			return nil, err
   650  		}
   651  		if param.LoadFile && len(fileList) == 0 {
   652  			return nil, moerr.NewInvalidInput(ctx, "the file does not exist in load flow")
   653  		}
   654  	} else {
   655  		fileList = []string{param.Filepath}
   656  	}
   657  
   658  	var fileOffset [][][2]int
   659  	for i := 0; i < len(fileList); i++ {
   660  		param.Filepath = fileList[i]
   661  		if param.Parallel {
   662  			arr, err := external.ReadFileOffset(param, c.proc, mcpu, fileSize[i])
   663  			fileOffset = append(fileOffset, arr)
   664  			if err != nil {
   665  				return nil, err
   666  			}
   667  		}
   668  	}
   669  
   670  	cnt := len(fileList) / mcpu
   671  	tag := len(fileList) % mcpu
   672  	index := 0
   673  	currentFirstFlag := c.anal.isFirst
   674  	ss := make([]*Scope, mcpu)
   675  	for i := 0; i < mcpu; i++ {
   676  		ss[i] = c.ConstructScope()
   677  		var fileListTmp []string
   678  		if i < tag {
   679  			fileListTmp = fileList[index : index+cnt+1]
   680  			index += cnt + 1
   681  		} else {
   682  			fileListTmp = fileList[index : index+cnt]
   683  			index += cnt
   684  		}
   685  		offset := make([][2]int, 0)
   686  		for j := 0; j < len(fileOffset); j++ {
   687  			offset = append(offset, [2]int{fileOffset[j][i][0], fileOffset[j][i][1]})
   688  		}
   689  		ss[i].appendInstruction(vm.Instruction{
   690  			Op:      vm.External,
   691  			Idx:     c.anal.curr,
   692  			IsFirst: currentFirstFlag,
   693  			Arg:     constructExternal(n, param, c.ctx, fileListTmp, fileSize, offset),
   694  		})
   695  		if param.Parallel {
   696  			ss[i].Instructions[0].Arg.(*external.Argument).Es.FileList = fileList
   697  		}
   698  	}
   699  	c.anal.isFirst = false
   700  	return ss, nil
   701  }
   702  
   703  func (c *Compile) compileTableFunction(n *plan.Node, ss []*Scope) ([]*Scope, error) {
   704  	currentFirstFlag := c.anal.isFirst
   705  	for i := range ss {
   706  		ss[i].appendInstruction(vm.Instruction{
   707  			Op:      vm.TableFunction,
   708  			Idx:     c.anal.curr,
   709  			IsFirst: currentFirstFlag,
   710  			Arg:     constructTableFunction(n, c.ctx, n.TableDef.TblFunc.Name),
   711  		})
   712  	}
   713  	c.anal.isFirst = false
   714  
   715  	return ss, nil
   716  }
   717  
   718  func (c *Compile) compileTableScan(n *plan.Node) ([]*Scope, error) {
   719  	nodes, err := c.generateNodes(n)
   720  	if err != nil {
   721  		return nil, err
   722  	}
   723  	ss := make([]*Scope, 0, len(nodes))
   724  	for i := range nodes {
   725  		ss = append(ss, c.compileTableScanWithNode(n, nodes[i]))
   726  	}
   727  	return ss, nil
   728  }
   729  
   730  func (c *Compile) compileTableScanWithNode(n *plan.Node, node engine.Node) *Scope {
   731  	var s *Scope
   732  	var tblDef *plan.TableDef
   733  	var ts timestamp.Timestamp
   734  	var db engine.Database
   735  	var rel engine.Relation
   736  	var err error
   737  
   738  	attrs := make([]string, len(n.TableDef.Cols))
   739  	for j, col := range n.TableDef.Cols {
   740  		attrs[j] = col.Name
   741  	}
   742  	if c.proc != nil && c.proc.TxnOperator != nil {
   743  		ts = c.proc.TxnOperator.Txn().SnapshotTS
   744  	}
   745  	{
   746  		var cols []*plan.ColDef
   747  		ctx := c.ctx
   748  		if util.TableIsClusterTable(n.TableDef.GetTableType()) {
   749  			ctx = context.WithValue(ctx, defines.TenantIDKey{}, catalog.System_Account)
   750  		}
   751  		db, err = c.e.Database(ctx, n.ObjRef.SchemaName, c.proc.TxnOperator)
   752  		if err != nil {
   753  			panic(err)
   754  		}
   755  		rel, err = db.Relation(ctx, n.TableDef.Name)
   756  		if err != nil {
   757  			var e error // avoid contamination of error messages
   758  			db, e = c.e.Database(c.ctx, defines.TEMPORARY_DBNAME, c.proc.TxnOperator)
   759  			if e != nil {
   760  				panic(e)
   761  			}
   762  			rel, e = db.Relation(c.ctx, engine.GetTempTableName(n.ObjRef.SchemaName, n.TableDef.Name))
   763  			if e != nil {
   764  				panic(e)
   765  			}
   766  		}
   767  		defs, err := rel.TableDefs(ctx)
   768  		if err != nil {
   769  			panic(err)
   770  		}
   771  		i := int32(0)
   772  		name2index := make(map[string]int32)
   773  		for _, def := range defs {
   774  			if attr, ok := def.(*engine.AttributeDef); ok {
   775  				name2index[attr.Attr.Name] = i
   776  				cols = append(cols, &plan.ColDef{
   777  					Name: attr.Attr.Name,
   778  					Typ: &plan.Type{
   779  						Id:        int32(attr.Attr.Type.Oid),
   780  						Width:     attr.Attr.Type.Width,
   781  						Size:      attr.Attr.Type.Size,
   782  						Precision: attr.Attr.Type.Precision,
   783  						Scale:     attr.Attr.Type.Scale,
   784  						AutoIncr:  attr.Attr.AutoIncrement,
   785  					},
   786  					Primary:   attr.Attr.Primary,
   787  					Default:   attr.Attr.Default,
   788  					OnUpdate:  attr.Attr.OnUpdate,
   789  					Comment:   attr.Attr.Comment,
   790  					ClusterBy: attr.Attr.ClusterBy,
   791  				})
   792  				i++
   793  			}
   794  		}
   795  		tblDef = &plan.TableDef{
   796  			Cols:          cols,
   797  			Name2ColIndex: name2index,
   798  			Name:          n.TableDef.Name,
   799  			TableType:     n.TableDef.GetTableType(),
   800  		}
   801  	}
   802  	s = &Scope{
   803  		Magic:    Remote,
   804  		NodeInfo: node,
   805  		DataSource: &Source{
   806  			Timestamp:    ts,
   807  			Attributes:   attrs,
   808  			TableDef:     tblDef,
   809  			RelationName: n.TableDef.Name,
   810  			SchemaName:   n.ObjRef.SchemaName,
   811  			Expr:         colexec.RewriteFilterExprList(n.FilterList),
   812  		},
   813  	}
   814  	s.Proc = process.NewWithAnalyze(c.proc, c.ctx, 0, c.anal.Nodes())
   815  	return s
   816  }
   817  
   818  func (c *Compile) compileRestrict(n *plan.Node, ss []*Scope) []*Scope {
   819  	if len(n.FilterList) == 0 {
   820  		return ss
   821  	}
   822  	currentFirstFlag := c.anal.isFirst
   823  	for i := range ss {
   824  		ss[i].appendInstruction(vm.Instruction{
   825  			Op:      vm.Restrict,
   826  			Idx:     c.anal.curr,
   827  			IsFirst: currentFirstFlag,
   828  			Arg:     constructRestrict(n),
   829  		})
   830  	}
   831  	c.anal.isFirst = false
   832  	return ss
   833  }
   834  
   835  func (c *Compile) compileProjection(n *plan.Node, ss []*Scope) []*Scope {
   836  	currentFirstFlag := c.anal.isFirst
   837  	for i := range ss {
   838  		ss[i].appendInstruction(vm.Instruction{
   839  			Op:      vm.Projection,
   840  			Idx:     c.anal.curr,
   841  			IsFirst: currentFirstFlag,
   842  			Arg:     constructProjection(n),
   843  		})
   844  	}
   845  	c.anal.isFirst = false
   846  	return ss
   847  }
   848  
   849  func (c *Compile) compileUnion(n *plan.Node, ss []*Scope, children []*Scope, ns []*plan.Node) []*Scope {
   850  	ss = append(ss, children...)
   851  	rs := c.newScopeList(1, int(n.Stats.BlockNum))
   852  	gn := new(plan.Node)
   853  	gn.GroupBy = make([]*plan.Expr, len(n.ProjectList))
   854  	copy(gn.GroupBy, n.ProjectList)
   855  	for i := range rs {
   856  		ch := c.newMergeScope(dupScopeList(ss))
   857  		ch.appendInstruction(vm.Instruction{
   858  			Op: vm.Connector,
   859  			Arg: &connector.Argument{
   860  				Reg: rs[i].Proc.Reg.MergeReceivers[0],
   861  			},
   862  		})
   863  		ch.IsEnd = true
   864  		rs[i].PreScopes = []*Scope{ch}
   865  		rs[i].Instructions = append(rs[i].Instructions, vm.Instruction{
   866  			Op:  vm.Group,
   867  			Idx: c.anal.curr,
   868  			Arg: constructGroup(c.ctx, gn, n, i, len(rs), true, c.proc),
   869  		})
   870  	}
   871  	return rs
   872  }
   873  
   874  func (c *Compile) compileMinusAndIntersect(n *plan.Node, ss []*Scope, children []*Scope, nodeType plan.Node_NodeType) []*Scope {
   875  	rs := c.newJoinScopeListWithBucket(c.newScopeList(2, int(n.Stats.BlockNum)), ss, children)
   876  	switch nodeType {
   877  	case plan.Node_MINUS:
   878  		for i := range rs {
   879  			rs[i].Instructions[0] = vm.Instruction{
   880  				Op:  vm.Minus,
   881  				Idx: c.anal.curr,
   882  				Arg: constructMinus(n, c.proc, i, len(rs)),
   883  			}
   884  		}
   885  	case plan.Node_INTERSECT:
   886  		for i := range rs {
   887  			rs[i].Instructions[0] = vm.Instruction{
   888  				Op:  vm.Intersect,
   889  				Idx: c.anal.curr,
   890  				Arg: constructIntersect(n, c.proc, i, len(rs)),
   891  			}
   892  		}
   893  	case plan.Node_INTERSECT_ALL:
   894  		for i := range rs {
   895  			rs[i].Instructions[0] = vm.Instruction{
   896  				Op:  vm.IntersectAll,
   897  				Idx: c.anal.curr,
   898  				Arg: constructIntersectAll(n, c.proc, i, len(rs)),
   899  			}
   900  		}
   901  
   902  	}
   903  	return rs
   904  }
   905  
   906  func (c *Compile) compileUnionAll(n *plan.Node, ss []*Scope, children []*Scope) []*Scope {
   907  	rs := c.newMergeScope(append(ss, children...))
   908  	rs.Instructions[0].Idx = c.anal.curr
   909  	return []*Scope{rs}
   910  }
   911  
   912  func (c *Compile) compileJoin(ctx context.Context, n, right *plan.Node, ss []*Scope, children []*Scope, joinTyp plan.Node_JoinFlag) []*Scope {
   913  	var rs []*Scope
   914  
   915  	isEq := isEquiJoin(n.OnList)
   916  	typs := make([]types.Type, len(right.ProjectList))
   917  	for i, expr := range right.ProjectList {
   918  		typs[i] = dupType(expr.Typ)
   919  	}
   920  	switch joinTyp {
   921  	case plan.Node_INNER:
   922  		rs = c.newBroadcastJoinScopeList(ss, children)
   923  		if len(n.OnList) == 0 {
   924  			for i := range rs {
   925  				rs[i].appendInstruction(vm.Instruction{
   926  					Op:  vm.Product,
   927  					Idx: c.anal.curr,
   928  					Arg: constructProduct(n, typs, c.proc),
   929  				})
   930  			}
   931  		} else {
   932  			for i := range rs {
   933  				if isEq {
   934  					rs[i].appendInstruction(vm.Instruction{
   935  						Op:  vm.Join,
   936  						Idx: c.anal.curr,
   937  						Arg: constructJoin(n, typs, c.proc),
   938  					})
   939  				} else {
   940  					rs[i].appendInstruction(vm.Instruction{
   941  						Op:  vm.LoopJoin,
   942  						Idx: c.anal.curr,
   943  						Arg: constructLoopJoin(n, typs, c.proc),
   944  					})
   945  				}
   946  			}
   947  		}
   948  	case plan.Node_SEMI:
   949  		rs = c.newBroadcastJoinScopeList(ss, children)
   950  		for i := range rs {
   951  			if isEq {
   952  				rs[i].appendInstruction(vm.Instruction{
   953  					Op:  vm.Semi,
   954  					Idx: c.anal.curr,
   955  					Arg: constructSemi(n, typs, c.proc),
   956  				})
   957  			} else {
   958  				rs[i].appendInstruction(vm.Instruction{
   959  					Op:  vm.LoopSemi,
   960  					Idx: c.anal.curr,
   961  					Arg: constructLoopSemi(n, typs, c.proc),
   962  				})
   963  			}
   964  		}
   965  	case plan.Node_LEFT:
   966  		rs = c.newBroadcastJoinScopeList(ss, children)
   967  		for i := range rs {
   968  			if isEq {
   969  				rs[i].appendInstruction(vm.Instruction{
   970  					Op:  vm.Left,
   971  					Idx: c.anal.curr,
   972  					Arg: constructLeft(n, typs, c.proc),
   973  				})
   974  			} else {
   975  				rs[i].appendInstruction(vm.Instruction{
   976  					Op:  vm.LoopLeft,
   977  					Idx: c.anal.curr,
   978  					Arg: constructLoopLeft(n, typs, c.proc),
   979  				})
   980  			}
   981  		}
   982  	case plan.Node_SINGLE:
   983  		rs = c.newBroadcastJoinScopeList(ss, children)
   984  		for i := range rs {
   985  			if isEq {
   986  				rs[i].appendInstruction(vm.Instruction{
   987  					Op:  vm.Single,
   988  					Idx: c.anal.curr,
   989  					Arg: constructSingle(n, typs, c.proc),
   990  				})
   991  			} else {
   992  				rs[i].appendInstruction(vm.Instruction{
   993  					Op:  vm.LoopSingle,
   994  					Idx: c.anal.curr,
   995  					Arg: constructLoopSingle(n, typs, c.proc),
   996  				})
   997  			}
   998  		}
   999  	case plan.Node_ANTI:
  1000  		rs = c.newBroadcastJoinScopeList(ss, children)
  1001  		_, conds := extraJoinConditions(n.OnList)
  1002  		for i := range rs {
  1003  			if isEq && len(conds) == 1 {
  1004  				rs[i].appendInstruction(vm.Instruction{
  1005  					Op:  vm.Anti,
  1006  					Idx: c.anal.curr,
  1007  					Arg: constructAnti(n, typs, c.proc),
  1008  				})
  1009  			} else {
  1010  				rs[i].appendInstruction(vm.Instruction{
  1011  					Op:  vm.LoopAnti,
  1012  					Idx: c.anal.curr,
  1013  					Arg: constructLoopAnti(n, typs, c.proc),
  1014  				})
  1015  			}
  1016  		}
  1017  	case plan.Node_MARK:
  1018  		rs = c.newBroadcastJoinScopeList(ss, children)
  1019  		for i := range rs {
  1020  			//if isEq {
  1021  			//	rs[i].appendInstruction(vm.Instruction{
  1022  			//		Op:  vm.Mark,
  1023  			//		Idx: c.anal.curr,
  1024  			//		Arg: constructMark(n, typs, c.proc),
  1025  			//	})
  1026  			//} else {
  1027  			rs[i].appendInstruction(vm.Instruction{
  1028  				Op:  vm.LoopMark,
  1029  				Idx: c.anal.curr,
  1030  				Arg: constructLoopMark(n, typs, c.proc),
  1031  			})
  1032  			//}
  1033  		}
  1034  	default:
  1035  		panic(moerr.NewNYI(ctx, fmt.Sprintf("join typ '%v'", n.JoinType)))
  1036  	}
  1037  	return rs
  1038  }
  1039  
  1040  func (c *Compile) compileSort(n *plan.Node, ss []*Scope) []*Scope {
  1041  	switch {
  1042  	case n.Limit != nil && n.Offset == nil && len(n.OrderBy) > 0: // top
  1043  		vec, err := colexec.EvalExpr(constBat, c.proc, n.Limit)
  1044  		if err != nil {
  1045  			panic(err)
  1046  		}
  1047  		defer vec.Free(c.proc.Mp())
  1048  		return c.compileTop(n, vec.Col.([]int64)[0], ss)
  1049  	case n.Limit == nil && n.Offset == nil && len(n.OrderBy) > 0: // top
  1050  		return c.compileOrder(n, ss)
  1051  	case n.Limit != nil && n.Offset != nil && len(n.OrderBy) > 0:
  1052  		vec1, err := colexec.EvalExpr(constBat, c.proc, n.Limit)
  1053  		if err != nil {
  1054  			panic(err)
  1055  		}
  1056  		defer vec1.Free(c.proc.Mp())
  1057  		vec2, err := colexec.EvalExpr(constBat, c.proc, n.Offset)
  1058  		if err != nil {
  1059  			panic(err)
  1060  		}
  1061  		defer vec2.Free(c.proc.Mp())
  1062  		limit, offset := vec1.Col.([]int64)[0], vec2.Col.([]int64)[0]
  1063  		topN := limit + offset
  1064  		if topN <= 8192*2 {
  1065  			// if n is small, convert `order by col limit m offset n` to `top m+n offset n`
  1066  			return c.compileOffset(n, c.compileTop(n, topN, ss))
  1067  		}
  1068  		return c.compileLimit(n, c.compileOffset(n, c.compileOrder(n, ss)))
  1069  	case n.Limit == nil && n.Offset != nil && len(n.OrderBy) > 0: // order and offset
  1070  		return c.compileOffset(n, c.compileOrder(n, ss))
  1071  	case n.Limit != nil && n.Offset == nil && len(n.OrderBy) == 0: // limit
  1072  		return c.compileLimit(n, ss)
  1073  	case n.Limit == nil && n.Offset != nil && len(n.OrderBy) == 0: // offset
  1074  		return c.compileOffset(n, ss)
  1075  	case n.Limit != nil && n.Offset != nil && len(n.OrderBy) == 0: // limit and offset
  1076  		return c.compileLimit(n, c.compileOffset(n, ss))
  1077  	default:
  1078  		return ss
  1079  	}
  1080  }
  1081  
  1082  func containBrokenNode(s *Scope) bool {
  1083  	for i := range s.Instructions {
  1084  		if s.Instructions[i].IsBrokenNode() {
  1085  			return true
  1086  		}
  1087  	}
  1088  	return false
  1089  }
  1090  
  1091  func (c *Compile) compileTop(n *plan.Node, topN int64, ss []*Scope) []*Scope {
  1092  	// use topN TO make scope.
  1093  	currentFirstFlag := c.anal.isFirst
  1094  	for i := range ss {
  1095  		c.anal.isFirst = currentFirstFlag
  1096  		if containBrokenNode(ss[i]) {
  1097  			ss[i] = c.newMergeScope([]*Scope{ss[i]})
  1098  		}
  1099  		ss[i].appendInstruction(vm.Instruction{
  1100  			Op:      vm.Top,
  1101  			Idx:     c.anal.curr,
  1102  			IsFirst: c.anal.isFirst,
  1103  			Arg:     constructTop(n, topN),
  1104  		})
  1105  	}
  1106  	c.anal.isFirst = false
  1107  
  1108  	rs := c.newMergeScope(ss)
  1109  	rs.Instructions[0] = vm.Instruction{
  1110  		Op:  vm.MergeTop,
  1111  		Idx: c.anal.curr,
  1112  		Arg: constructMergeTop(n, topN),
  1113  	}
  1114  	return []*Scope{rs}
  1115  }
  1116  
  1117  func (c *Compile) compileOrder(n *plan.Node, ss []*Scope) []*Scope {
  1118  	currentFirstFlag := c.anal.isFirst
  1119  	for i := range ss {
  1120  		c.anal.isFirst = currentFirstFlag
  1121  		if containBrokenNode(ss[i]) {
  1122  			ss[i] = c.newMergeScope([]*Scope{ss[i]})
  1123  		}
  1124  		ss[i].appendInstruction(vm.Instruction{
  1125  			Op:      vm.Order,
  1126  			Idx:     c.anal.curr,
  1127  			IsFirst: c.anal.isFirst,
  1128  			Arg:     constructOrder(n, c.proc),
  1129  		})
  1130  	}
  1131  	c.anal.isFirst = false
  1132  
  1133  	rs := c.newMergeScope(ss)
  1134  	rs.Instructions[0] = vm.Instruction{
  1135  		Op:  vm.MergeOrder,
  1136  		Idx: c.anal.curr,
  1137  		Arg: constructMergeOrder(n, c.proc),
  1138  	}
  1139  	return []*Scope{rs}
  1140  }
  1141  
  1142  func (c *Compile) compileOffset(n *plan.Node, ss []*Scope) []*Scope {
  1143  	currentFirstFlag := c.anal.isFirst
  1144  	for i := range ss {
  1145  		if containBrokenNode(ss[i]) {
  1146  			c.anal.isFirst = currentFirstFlag
  1147  			ss[i] = c.newMergeScope([]*Scope{ss[i]})
  1148  		}
  1149  	}
  1150  
  1151  	rs := c.newMergeScope(ss)
  1152  	rs.Instructions[0] = vm.Instruction{
  1153  		Op:  vm.MergeOffset,
  1154  		Idx: c.anal.curr,
  1155  		Arg: constructMergeOffset(n, c.proc),
  1156  	}
  1157  	return []*Scope{rs}
  1158  }
  1159  
  1160  func (c *Compile) compileLimit(n *plan.Node, ss []*Scope) []*Scope {
  1161  	currentFirstFlag := c.anal.isFirst
  1162  	for i := range ss {
  1163  		c.anal.isFirst = currentFirstFlag
  1164  		if containBrokenNode(ss[i]) {
  1165  			ss[i] = c.newMergeScope([]*Scope{ss[i]})
  1166  		}
  1167  		ss[i].appendInstruction(vm.Instruction{
  1168  			Op:      vm.Limit,
  1169  			Idx:     c.anal.curr,
  1170  			IsFirst: c.anal.isFirst,
  1171  			Arg:     constructLimit(n, c.proc),
  1172  		})
  1173  	}
  1174  	c.anal.isFirst = false
  1175  
  1176  	rs := c.newMergeScope(ss)
  1177  	rs.Instructions[0] = vm.Instruction{
  1178  		Op:  vm.MergeLimit,
  1179  		Idx: c.anal.curr,
  1180  		Arg: constructMergeLimit(n, c.proc),
  1181  	}
  1182  	return []*Scope{rs}
  1183  }
  1184  
  1185  func (c *Compile) compileAgg(n *plan.Node, ss []*Scope, ns []*plan.Node) []*Scope {
  1186  	currentFirstFlag := c.anal.isFirst
  1187  	for i := range ss {
  1188  		c.anal.isFirst = currentFirstFlag
  1189  		if containBrokenNode(ss[i]) {
  1190  			ss[i] = c.newMergeScope([]*Scope{ss[i]})
  1191  		}
  1192  		ss[i].appendInstruction(vm.Instruction{
  1193  			Op:      vm.Group,
  1194  			Idx:     c.anal.curr,
  1195  			IsFirst: c.anal.isFirst,
  1196  			Arg:     constructGroup(c.ctx, n, ns[n.Children[0]], 0, 0, false, c.proc),
  1197  		})
  1198  	}
  1199  	c.anal.isFirst = false
  1200  
  1201  	rs := c.newMergeScope(ss)
  1202  	rs.Instructions[0] = vm.Instruction{
  1203  		Op:  vm.MergeGroup,
  1204  		Idx: c.anal.curr,
  1205  		Arg: constructMergeGroup(n, true),
  1206  	}
  1207  	return []*Scope{rs}
  1208  }
  1209  
  1210  func (c *Compile) compileGroup(n *plan.Node, ss []*Scope, ns []*plan.Node) []*Scope {
  1211  	currentIsFirst := c.anal.isFirst
  1212  	c.anal.isFirst = false
  1213  	rs := c.newScopeList(validScopeCount(ss), int(n.Stats.BlockNum))
  1214  	j := 0
  1215  	for i := range ss {
  1216  		if containBrokenNode(ss[i]) {
  1217  			isEnd := ss[i].IsEnd
  1218  			ss[i] = c.newMergeScope([]*Scope{ss[i]})
  1219  			ss[i].IsEnd = isEnd
  1220  		}
  1221  		if !ss[i].IsEnd {
  1222  			ss[i].appendInstruction(vm.Instruction{
  1223  				Op:  vm.Dispatch,
  1224  				Arg: constructDispatchLocal(true, extraRegisters(rs, j)),
  1225  			})
  1226  			j++
  1227  			ss[i].IsEnd = true
  1228  		}
  1229  	}
  1230  
  1231  	for i := range rs {
  1232  		rs[i].Instructions = append(rs[i].Instructions, vm.Instruction{
  1233  			Op:      vm.Group,
  1234  			Idx:     c.anal.curr,
  1235  			IsFirst: currentIsFirst,
  1236  			Arg:     constructGroup(c.ctx, n, ns[n.Children[0]], i, len(rs), true, c.proc),
  1237  		})
  1238  	}
  1239  	return []*Scope{c.newMergeScope(append(rs, ss...))}
  1240  }
  1241  
  1242  func (c *Compile) newInsertMergeScope(arg *insert.Argument, ss []*Scope) *Scope {
  1243  	ss2 := make([]*Scope, 0, len(ss))
  1244  	for _, s := range ss {
  1245  		if s.IsEnd {
  1246  			continue
  1247  		}
  1248  		ss2 = append(ss2, s)
  1249  	}
  1250  	insert := &vm.Instruction{
  1251  		Op:  vm.Insert,
  1252  		Arg: arg,
  1253  	}
  1254  	for i := range ss2 {
  1255  		ss2[i].Instructions = append(ss2[i].Instructions, dupInstruction(insert, nil))
  1256  	}
  1257  	return c.newMergeScope(ss2)
  1258  }
  1259  
  1260  func (c *Compile) newMergeScope(ss []*Scope) *Scope {
  1261  	rs := &Scope{
  1262  		PreScopes: ss,
  1263  		Magic:     Merge,
  1264  	}
  1265  	cnt := 0
  1266  	for _, s := range ss {
  1267  		if s.IsEnd {
  1268  			continue
  1269  		}
  1270  		cnt++
  1271  	}
  1272  	rs.Proc = process.NewWithAnalyze(c.proc, c.ctx, cnt, c.anal.Nodes())
  1273  	if len(ss) > 0 {
  1274  		rs.Proc.LoadTag = ss[0].Proc.LoadTag
  1275  	}
  1276  	rs.Instructions = append(rs.Instructions, vm.Instruction{
  1277  		Op:      vm.Merge,
  1278  		Idx:     c.anal.curr,
  1279  		IsFirst: c.anal.isFirst,
  1280  		Arg:     &merge.Argument{},
  1281  	})
  1282  	c.anal.isFirst = false
  1283  
  1284  	j := 0
  1285  	for i := range ss {
  1286  		if !ss[i].IsEnd {
  1287  			ss[i].appendInstruction(vm.Instruction{
  1288  				Op: vm.Connector,
  1289  				Arg: &connector.Argument{
  1290  					Reg: rs.Proc.Reg.MergeReceivers[j],
  1291  				},
  1292  			})
  1293  			j++
  1294  		}
  1295  	}
  1296  	return rs
  1297  }
  1298  
  1299  func (c *Compile) newScopeList(childrenCount int, blocks int) []*Scope {
  1300  	var ss []*Scope
  1301  
  1302  	currentFirstFlag := c.anal.isFirst
  1303  	for _, n := range c.cnList {
  1304  		c.anal.isFirst = currentFirstFlag
  1305  		ss = append(ss, c.newScopeListWithNode(c.generateCPUNumber(n.Mcpu, blocks), childrenCount)...)
  1306  	}
  1307  	return ss
  1308  }
  1309  
  1310  func (c *Compile) newScopeListWithNode(mcpu, childrenCount int) []*Scope {
  1311  	ss := make([]*Scope, mcpu)
  1312  	currentFirstFlag := c.anal.isFirst
  1313  	for i := range ss {
  1314  		ss[i] = new(Scope)
  1315  		ss[i].Magic = Remote
  1316  		ss[i].Proc = process.NewWithAnalyze(c.proc, c.ctx, childrenCount, c.anal.Nodes())
  1317  		ss[i].Instructions = append(ss[i].Instructions, vm.Instruction{
  1318  			Op:      vm.Merge,
  1319  			Idx:     c.anal.curr,
  1320  			IsFirst: currentFirstFlag,
  1321  			Arg:     &merge.Argument{},
  1322  		})
  1323  	}
  1324  	c.anal.isFirst = false
  1325  	return ss
  1326  }
  1327  
  1328  func (c *Compile) newJoinScopeListWithBucket(rs, ss, children []*Scope) []*Scope {
  1329  	currentFirstFlag := c.anal.isFirst
  1330  	for i := range rs {
  1331  		c.anal.isFirst = currentFirstFlag
  1332  		left := c.newMergeScope(dupScopeList(ss))
  1333  
  1334  		c.anal.isFirst = currentFirstFlag
  1335  		right := c.newMergeScope(dupScopeList(children))
  1336  
  1337  		rs[i].PreScopes = []*Scope{left, right}
  1338  		left.appendInstruction(vm.Instruction{
  1339  			Op: vm.Connector,
  1340  			Arg: &connector.Argument{
  1341  				Reg: rs[i].Proc.Reg.MergeReceivers[0],
  1342  			},
  1343  		})
  1344  		right.appendInstruction(vm.Instruction{
  1345  			Op: vm.Connector,
  1346  			Arg: &connector.Argument{
  1347  				Reg: rs[i].Proc.Reg.MergeReceivers[1],
  1348  			},
  1349  		})
  1350  		left.IsEnd = true
  1351  		right.IsEnd = true
  1352  	}
  1353  	return rs
  1354  }
  1355  
  1356  //func (c *Compile) newJoinScopeList(ss []*Scope, children []*Scope) []*Scope {
  1357  //rs := make([]*Scope, len(ss))
  1358  //// join's input will record in the left/right scope when JoinRun
  1359  //// so set it to false here.
  1360  //c.anal.isFirst = false
  1361  //for i := range ss {
  1362  //if ss[i].IsEnd {
  1363  //rs[i] = ss[i]
  1364  //continue
  1365  //}
  1366  //chp := c.newMergeScope(dupScopeList(children))
  1367  //rs[i] = new(Scope)
  1368  //rs[i].Magic = Remote
  1369  //rs[i].IsJoin = true
  1370  //rs[i].NodeInfo = ss[i].NodeInfo
  1371  //rs[i].PreScopes = []*Scope{ss[i], chp}
  1372  //rs[i].Proc = process.NewWithAnalyze(c.proc, c.ctx, 2, c.anal.Nodes())
  1373  //ss[i].appendInstruction(vm.Instruction{
  1374  //Op: vm.Connector,
  1375  //Arg: &connector.Argument{
  1376  //Reg: rs[i].Proc.Reg.MergeReceivers[0],
  1377  //},
  1378  //})
  1379  //chp.appendInstruction(vm.Instruction{
  1380  //Op: vm.Connector,
  1381  //Arg: &connector.Argument{
  1382  //Reg: rs[i].Proc.Reg.MergeReceivers[1],
  1383  //},
  1384  //})
  1385  //chp.IsEnd = true
  1386  //}
  1387  //return rs
  1388  //}
  1389  
  1390  func (c *Compile) newBroadcastJoinScopeList(ss []*Scope, children []*Scope) []*Scope {
  1391  	len := len(ss)
  1392  	rs := make([]*Scope, len)
  1393  	idx := 0
  1394  	for i := range ss {
  1395  		if ss[i].IsEnd {
  1396  			rs[i] = ss[i]
  1397  			continue
  1398  		}
  1399  		rs[i] = new(Scope)
  1400  		rs[i].Magic = Remote
  1401  		rs[i].IsJoin = true
  1402  		rs[i].NodeInfo = ss[i].NodeInfo
  1403  		if isCurrentCN(rs[i].NodeInfo.Addr, c.addr) {
  1404  			idx = i
  1405  		}
  1406  		rs[i].PreScopes = []*Scope{ss[i]}
  1407  		rs[i].Proc = process.NewWithAnalyze(c.proc, c.ctx, 2, c.anal.Nodes())
  1408  		ss[i].appendInstruction(vm.Instruction{
  1409  			Op: vm.Connector,
  1410  			Arg: &connector.Argument{
  1411  				Reg: rs[i].Proc.Reg.MergeReceivers[0],
  1412  			},
  1413  		})
  1414  	}
  1415  
  1416  	mergeChildren := c.newMergeScope(children)
  1417  	mergeChildren.appendInstruction(vm.Instruction{
  1418  		Op:  vm.Dispatch,
  1419  		Arg: constructBroadcastJoinDispatch(1, rs, c.addr, mergeChildren.Proc),
  1420  	})
  1421  	rs[idx].PreScopes = append(rs[idx].PreScopes, mergeChildren)
  1422  
  1423  	return rs
  1424  }
  1425  
  1426  func (c *Compile) newLeftScope(s *Scope, ss []*Scope) *Scope {
  1427  	rs := &Scope{
  1428  		Magic: Merge,
  1429  	}
  1430  	rs.appendInstruction(vm.Instruction{
  1431  		Op:      vm.Merge,
  1432  		Idx:     s.Instructions[0].Idx,
  1433  		IsFirst: true,
  1434  		Arg:     &merge.Argument{},
  1435  	})
  1436  	rs.appendInstruction(vm.Instruction{
  1437  		Op:  vm.Dispatch,
  1438  		Arg: constructDispatchLocal(false, extraRegisters(ss, 0)),
  1439  	})
  1440  	rs.IsEnd = true
  1441  	rs.Proc = process.NewWithAnalyze(s.Proc, c.ctx, 1, c.anal.Nodes())
  1442  	rs.Proc.Reg.MergeReceivers[0] = s.Proc.Reg.MergeReceivers[0]
  1443  	return rs
  1444  }
  1445  
  1446  func (c *Compile) newRightScope(s *Scope, ss []*Scope) *Scope {
  1447  	rs := &Scope{
  1448  		Magic: Merge,
  1449  	}
  1450  	rs.appendInstruction(vm.Instruction{
  1451  		Op:      vm.HashBuild,
  1452  		Idx:     s.Instructions[0].Idx,
  1453  		IsFirst: true,
  1454  		Arg:     constructHashBuild(s.Instructions[0], c.proc),
  1455  	})
  1456  	rs.appendInstruction(vm.Instruction{
  1457  		Op:  vm.Dispatch,
  1458  		Arg: constructDispatchLocal(true, extraRegisters(ss, 1)),
  1459  	})
  1460  	rs.IsEnd = true
  1461  	rs.Proc = process.NewWithAnalyze(s.Proc, c.ctx, 1, c.anal.Nodes())
  1462  	rs.Proc.Reg.MergeReceivers[0] = s.Proc.Reg.MergeReceivers[1]
  1463  
  1464  	for i, u := range s.RemoteReceivRegInfos {
  1465  		if u.Idx == 1 {
  1466  			rs.RemoteReceivRegInfos = append(rs.RemoteReceivRegInfos, RemoteReceivRegInfo{
  1467  				Idx:      0,
  1468  				Uuid:     u.Uuid,
  1469  				FromAddr: u.FromAddr,
  1470  			})
  1471  			s.RemoteReceivRegInfos = append(s.RemoteReceivRegInfos[:i], s.RemoteReceivRegInfos[i+1:]...)
  1472  			break
  1473  		}
  1474  	}
  1475  
  1476  	return rs
  1477  }
  1478  
  1479  // Number of cpu's available on the current machine
  1480  func (c *Compile) NumCPU() int {
  1481  	return runtime.NumCPU()
  1482  }
  1483  
  1484  func (c *Compile) generateCPUNumber(cpunum, blocks int) int {
  1485  	if blocks < cpunum {
  1486  		if blocks <= 0 {
  1487  			return 1
  1488  		}
  1489  		return blocks
  1490  	}
  1491  	if cpunum <= 0 {
  1492  		return 1
  1493  	}
  1494  	return cpunum
  1495  }
  1496  
  1497  func (c *Compile) initAnalyze(qry *plan.Query) {
  1498  	anals := make([]*process.AnalyzeInfo, len(qry.Nodes))
  1499  	for i := range anals {
  1500  		anals[i] = new(process.AnalyzeInfo)
  1501  	}
  1502  	c.anal = &anaylze{
  1503  		qry:       qry,
  1504  		analInfos: anals,
  1505  		curr:      int(qry.Steps[0]),
  1506  	}
  1507  }
  1508  
  1509  func (c *Compile) fillAnalyzeInfo() {
  1510  	for i, anal := range c.anal.analInfos {
  1511  		if c.anal.qry.Nodes[i].AnalyzeInfo == nil {
  1512  			c.anal.qry.Nodes[i].AnalyzeInfo = new(plan.AnalyzeInfo)
  1513  		}
  1514  		c.anal.qry.Nodes[i].AnalyzeInfo.InputRows = atomic.LoadInt64(&anal.InputRows)
  1515  		c.anal.qry.Nodes[i].AnalyzeInfo.OutputRows = atomic.LoadInt64(&anal.OutputRows)
  1516  		c.anal.qry.Nodes[i].AnalyzeInfo.InputSize = atomic.LoadInt64(&anal.InputSize)
  1517  		c.anal.qry.Nodes[i].AnalyzeInfo.OutputSize = atomic.LoadInt64(&anal.OutputSize)
  1518  		c.anal.qry.Nodes[i].AnalyzeInfo.TimeConsumed = atomic.LoadInt64(&anal.TimeConsumed)
  1519  		c.anal.qry.Nodes[i].AnalyzeInfo.MemorySize = atomic.LoadInt64(&anal.MemorySize)
  1520  		c.anal.qry.Nodes[i].AnalyzeInfo.WaitTimeConsumed = atomic.LoadInt64(&anal.WaitTimeConsumed)
  1521  		c.anal.qry.Nodes[i].AnalyzeInfo.DiskIO = atomic.LoadInt64(&anal.DiskIO)
  1522  		c.anal.qry.Nodes[i].AnalyzeInfo.S3IOByte = atomic.LoadInt64(&anal.S3IOByte)
  1523  		c.anal.qry.Nodes[i].AnalyzeInfo.S3IOCount = atomic.LoadInt64(&anal.S3IOCount)
  1524  		c.anal.qry.Nodes[i].AnalyzeInfo.NetworkIO = atomic.LoadInt64(&anal.NetworkIO)
  1525  		c.anal.qry.Nodes[i].AnalyzeInfo.ScanTime = atomic.LoadInt64(&anal.ScanTime)
  1526  		c.anal.qry.Nodes[i].AnalyzeInfo.InsertTime = atomic.LoadInt64(&anal.InsertTime)
  1527  	}
  1528  }
  1529  
  1530  func (c *Compile) generateNodes(n *plan.Node) (engine.Nodes, error) {
  1531  	var err error
  1532  	var db engine.Database
  1533  	var rel engine.Relation
  1534  	var ranges [][]byte
  1535  	var nodes engine.Nodes
  1536  	ctx := c.ctx
  1537  	if util.TableIsClusterTable(n.TableDef.GetTableType()) {
  1538  		ctx = context.WithValue(ctx, defines.TenantIDKey{}, catalog.System_Account)
  1539  	}
  1540  
  1541  	db, err = c.e.Database(ctx, n.ObjRef.SchemaName, c.proc.TxnOperator)
  1542  	if err != nil {
  1543  		return nil, err
  1544  	}
  1545  	rel, err = db.Relation(ctx, n.TableDef.Name)
  1546  	if err != nil {
  1547  		var e error // avoid contamination of error messages
  1548  		db, e = c.e.Database(ctx, defines.TEMPORARY_DBNAME, c.proc.TxnOperator)
  1549  		if e != nil {
  1550  			return nil, err
  1551  		}
  1552  
  1553  		rel, e = db.Relation(ctx, engine.GetTempTableName(n.ObjRef.SchemaName, n.TableDef.Name))
  1554  		if e != nil {
  1555  			return nil, err
  1556  		}
  1557  		c.isTemporaryScan = true
  1558  	}
  1559  	if c.isTemporaryScan {
  1560  		c.isTemporaryScan = false
  1561  		for i := 0; i < len(c.cnList); i++ {
  1562  			c.cnList[i].Addr = ""
  1563  		}
  1564  	}
  1565  	ranges, err = rel.Ranges(ctx, plan2.HandleFiltersForZM(n.FilterList, c.proc))
  1566  	if err != nil {
  1567  		return nil, err
  1568  	}
  1569  	if len(ranges) == 0 {
  1570  		nodes = make(engine.Nodes, len(c.cnList))
  1571  		for i, node := range c.cnList {
  1572  			nodes[i] = engine.Node{
  1573  				Rel:  rel,
  1574  				Id:   node.Id,
  1575  				Addr: node.Addr,
  1576  				Mcpu: c.generateCPUNumber(node.Mcpu, int(n.Stats.BlockNum)),
  1577  			}
  1578  		}
  1579  		return nodes, nil
  1580  	}
  1581  	if len(ranges[0]) == 0 {
  1582  		if c.info.Typ == plan2.ExecTypeTP {
  1583  			nodes = append(nodes, engine.Node{
  1584  				Rel:  rel,
  1585  				Mcpu: 1,
  1586  			})
  1587  		} else {
  1588  			nodes = append(nodes, engine.Node{
  1589  				Rel:  rel,
  1590  				Mcpu: c.generateCPUNumber(runtime.NumCPU(), int(n.Stats.BlockNum)),
  1591  			})
  1592  		}
  1593  		nodes[0].Data = append(nodes[0].Data, ranges[:1]...)
  1594  		ranges = ranges[1:]
  1595  	}
  1596  	if len(ranges) == 0 {
  1597  		return nodes, nil
  1598  	}
  1599  	step := (len(ranges) + len(c.cnList) - 1) / len(c.cnList)
  1600  	for i := 0; i < len(ranges); i += step {
  1601  		j := i / step
  1602  		if i+step >= len(ranges) {
  1603  			if strings.Split(c.addr, ":")[0] == strings.Split(c.cnList[j].Addr, ":")[0] {
  1604  				nodes[0].Data = append(nodes[0].Data, ranges[i:]...)
  1605  			} else {
  1606  				nodes = append(nodes, engine.Node{
  1607  					Rel:  rel,
  1608  					Id:   c.cnList[j].Id,
  1609  					Addr: c.cnList[j].Addr,
  1610  					Mcpu: c.generateCPUNumber(c.cnList[j].Mcpu, int(n.Stats.BlockNum)),
  1611  					Data: ranges[i:],
  1612  				})
  1613  			}
  1614  		} else {
  1615  			if strings.Split(c.addr, ":")[0] == strings.Split(c.cnList[j].Addr, ":")[0] {
  1616  				nodes[0].Data = append(nodes[0].Data, ranges[i:i+step]...)
  1617  			} else {
  1618  				nodes = append(nodes, engine.Node{
  1619  					Rel:  rel,
  1620  					Id:   c.cnList[j].Id,
  1621  					Addr: c.cnList[j].Addr,
  1622  					Mcpu: c.generateCPUNumber(c.cnList[j].Mcpu, int(n.Stats.BlockNum)),
  1623  					Data: ranges[i : i+step],
  1624  				})
  1625  			}
  1626  		}
  1627  	}
  1628  	return nodes, nil
  1629  }
  1630  
  1631  func (anal *anaylze) Nodes() []*process.AnalyzeInfo {
  1632  	return anal.analInfos
  1633  }
  1634  
  1635  func validScopeCount(ss []*Scope) int {
  1636  	var cnt int
  1637  
  1638  	for _, s := range ss {
  1639  		if s.IsEnd {
  1640  			continue
  1641  		}
  1642  		cnt++
  1643  	}
  1644  	return cnt
  1645  }
  1646  
  1647  func extraRegisters(ss []*Scope, i int) []*process.WaitRegister {
  1648  	regs := make([]*process.WaitRegister, 0, len(ss))
  1649  	for _, s := range ss {
  1650  		if s.IsEnd {
  1651  			continue
  1652  		}
  1653  		regs = append(regs, s.Proc.Reg.MergeReceivers[i])
  1654  	}
  1655  	return regs
  1656  }
  1657  
  1658  func joinType(ctx context.Context, n *plan.Node, ns []*plan.Node) (bool, plan.Node_JoinFlag) {
  1659  	switch n.JoinType {
  1660  	case plan.Node_INNER:
  1661  		return false, plan.Node_INNER
  1662  	case plan.Node_LEFT:
  1663  		return false, plan.Node_LEFT
  1664  	case plan.Node_SEMI:
  1665  		return false, plan.Node_SEMI
  1666  	case plan.Node_ANTI:
  1667  		return false, plan.Node_ANTI
  1668  	case plan.Node_RIGHT:
  1669  		return true, plan.Node_LEFT
  1670  	case plan.Node_SINGLE:
  1671  		return false, plan.Node_SINGLE
  1672  	case plan.Node_MARK:
  1673  		return false, plan.Node_MARK
  1674  	default:
  1675  		panic(moerr.NewNYI(ctx, fmt.Sprintf("join typ '%v'", n.JoinType)))
  1676  	}
  1677  }
  1678  
  1679  func dupType(typ *plan.Type) types.Type {
  1680  	return types.Type{
  1681  		Oid:       types.T(typ.Id),
  1682  		Size:      typ.Size,
  1683  		Width:     typ.Width,
  1684  		Scale:     typ.Scale,
  1685  		Precision: typ.Precision,
  1686  	}
  1687  }
  1688  
  1689  // Update the specific scopes's instruction to true
  1690  // then update the current idx
  1691  func (c *Compile) SetAnalyzeCurrent(updateScopes []*Scope, nextId int) {
  1692  	if updateScopes != nil {
  1693  		updateScopesLastFlag(updateScopes)
  1694  	}
  1695  
  1696  	c.anal.curr = nextId
  1697  	c.anal.isFirst = true
  1698  }
  1699  
  1700  func updateScopesLastFlag(updateScopes []*Scope) {
  1701  	for _, s := range updateScopes {
  1702  		last := len(s.Instructions) - 1
  1703  		s.Instructions[last].IsLast = true
  1704  	}
  1705  }
  1706  
  1707  func isCurrentCN(addr string, currentCNAddr string) bool {
  1708  	return strings.Split(addr, ":")[0] == strings.Split(currentCNAddr, ":")[0]
  1709  }
  1710  
  1711  func rowsetDataToVector(ctx context.Context, proc *process.Process, exprs []*plan.Expr) (*vector.Vector, error) {
  1712  	rowCount := len(exprs)
  1713  	if rowCount == 0 {
  1714  		return nil, moerr.NewInternalError(ctx, "rowsetData do not have rows")
  1715  	}
  1716  	var typ types.Type
  1717  	var vec *vector.Vector
  1718  	for _, e := range exprs {
  1719  		if e.Typ.Id != int32(types.T_any) {
  1720  			typ = plan2.MakeTypeByPlan2Type(e.Typ)
  1721  			vec = vector.New(typ)
  1722  			break
  1723  		}
  1724  	}
  1725  	if vec == nil {
  1726  		typ = types.T_int32.ToType()
  1727  		vec = vector.New(typ)
  1728  	}
  1729  	bat := batch.NewWithSize(0)
  1730  	bat.Zs = []int64{1}
  1731  	defer bat.Clean(proc.Mp())
  1732  
  1733  	for _, e := range exprs {
  1734  		tmp, err := colexec.EvalExpr(bat, proc, e)
  1735  		if err != nil {
  1736  			return nil, err
  1737  		}
  1738  		if tmp.IsScalarNull() {
  1739  			vec.Append(vector.GetInitConstVal(typ), true, proc.Mp())
  1740  			continue
  1741  		}
  1742  		switch typ.Oid {
  1743  		case types.T_bool:
  1744  			vec.Append(vector.MustTCols[bool](tmp)[0], false, proc.Mp())
  1745  		case types.T_int8:
  1746  			vec.Append(vector.MustTCols[int8](tmp)[0], false, proc.Mp())
  1747  		case types.T_int16:
  1748  			vec.Append(vector.MustTCols[int16](tmp)[0], false, proc.Mp())
  1749  		case types.T_int32:
  1750  			vec.Append(vector.MustTCols[int32](tmp)[0], false, proc.Mp())
  1751  		case types.T_int64:
  1752  			vec.Append(vector.MustTCols[int64](tmp)[0], false, proc.Mp())
  1753  		case types.T_uint8:
  1754  			vec.Append(vector.MustTCols[uint8](tmp)[0], false, proc.Mp())
  1755  		case types.T_uint16:
  1756  			vec.Append(vector.MustTCols[uint16](tmp)[0], false, proc.Mp())
  1757  		case types.T_uint32:
  1758  			vec.Append(vector.MustTCols[uint32](tmp)[0], false, proc.Mp())
  1759  		case types.T_uint64:
  1760  			vec.Append(vector.MustTCols[uint64](tmp)[0], false, proc.Mp())
  1761  		case types.T_float32:
  1762  			vec.Append(vector.MustTCols[float32](tmp)[0], false, proc.Mp())
  1763  		case types.T_float64:
  1764  			vec.Append(vector.MustTCols[float64](tmp)[0], false, proc.Mp())
  1765  		case types.T_char, types.T_varchar, types.T_json, types.T_blob, types.T_text:
  1766  			vec.Append(vector.MustBytesCols(tmp)[0], false, proc.Mp())
  1767  		case types.T_date:
  1768  			vec.Append(vector.MustTCols[types.Date](tmp)[0], false, proc.Mp())
  1769  		case types.T_datetime:
  1770  			vec.Append(vector.MustTCols[types.Datetime](tmp)[0], false, proc.Mp())
  1771  		case types.T_time:
  1772  			vec.Append(vector.MustTCols[types.Time](tmp)[0], false, proc.Mp())
  1773  		case types.T_timestamp:
  1774  			vec.Append(vector.MustTCols[types.Timestamp](tmp)[0], false, proc.Mp())
  1775  		case types.T_decimal64:
  1776  			vec.Append(vector.MustTCols[types.Decimal64](tmp)[0], false, proc.Mp())
  1777  		case types.T_decimal128:
  1778  			vec.Append(vector.MustTCols[types.Decimal128](tmp)[0], false, proc.Mp())
  1779  		case types.T_uuid:
  1780  			vec.Append(vector.MustTCols[types.Uuid](tmp)[0], false, proc.Mp())
  1781  		default:
  1782  			return nil, moerr.NewNYI(ctx, fmt.Sprintf("expression %v can not eval to constant and append to rowsetData", e))
  1783  		}
  1784  	}
  1785  	return vec, nil
  1786  }