github.com/matrixorigin/matrixone@v1.2.0/pkg/vm/engine/memoryengine/table_reader.go (about)

     1  // Copyright 2022 Matrix Origin
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //      http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package memoryengine
    16  
    17  import (
    18  	"context"
    19  	"encoding/binary"
    20  	"github.com/matrixorigin/matrixone/pkg/catalog"
    21  	"github.com/matrixorigin/matrixone/pkg/common/mpool"
    22  	"github.com/matrixorigin/matrixone/pkg/container/batch"
    23  	"github.com/matrixorigin/matrixone/pkg/logutil"
    24  	"github.com/matrixorigin/matrixone/pkg/objectio"
    25  	"github.com/matrixorigin/matrixone/pkg/pb/metadata"
    26  	"github.com/matrixorigin/matrixone/pkg/pb/plan"
    27  	"github.com/matrixorigin/matrixone/pkg/testutil"
    28  	"github.com/matrixorigin/matrixone/pkg/txn/client"
    29  	"github.com/matrixorigin/matrixone/pkg/vm/engine"
    30  )
    31  
    32  type TableReader struct {
    33  	ctx         context.Context
    34  	engine      *Engine
    35  	txnOperator client.TxnOperator
    36  	iterInfos   []IterInfo
    37  }
    38  
    39  type IterInfo struct {
    40  	Shard  Shard
    41  	IterID ID
    42  }
    43  
    44  func (t *Table) NewReader(ctx context.Context, parallel int, expr *plan.Expr, bytes []byte, _ bool) (readers []engine.Reader, err error) {
    45  	readers = make([]engine.Reader, parallel)
    46  	shardIDs := ShardIdSlice(bytes)
    47  
    48  	var shards []Shard
    49  	if len(shardIDs) == 0 {
    50  		switch t.id {
    51  
    52  		case catalog.MO_DATABASE_ID,
    53  			catalog.MO_TABLES_ID,
    54  			catalog.MO_COLUMNS_ID:
    55  			// sys table
    56  			var err error
    57  			shards, err = t.engine.anyShard()
    58  			if err != nil {
    59  				return nil, err
    60  			}
    61  
    62  		default:
    63  			// all
    64  			var err error
    65  			shards, err = t.engine.allShards()
    66  			if err != nil {
    67  				return nil, err
    68  			}
    69  		}
    70  
    71  	} else {
    72  		// some
    73  		idSet := make(map[uint64]bool)
    74  		for i := 0; i < shardIDs.Len(); i++ {
    75  			idSet[shardIDs.Get(i)] = true
    76  		}
    77  		for _, store := range getTNServices(t.engine.cluster) {
    78  			for _, shard := range store.Shards {
    79  				if !idSet[shard.ShardID] {
    80  					continue
    81  				}
    82  				shards = append(shards, Shard{
    83  					TNShardRecord: metadata.TNShardRecord{
    84  						ShardID: shard.ShardID,
    85  					},
    86  					ReplicaID: shard.ReplicaID,
    87  					Address:   store.TxnServiceAddress,
    88  				})
    89  			}
    90  		}
    91  	}
    92  
    93  	resps, err := DoTxnRequest[NewTableIterResp](
    94  		ctx,
    95  		t.txnOperator,
    96  		true,
    97  		theseShards(shards),
    98  		OpNewTableIter,
    99  		&NewTableIterReq{
   100  			TableID: t.id,
   101  			Expr:    expr,
   102  		},
   103  	)
   104  	if err != nil {
   105  		return nil, err
   106  	}
   107  
   108  	iterInfoSets := make([][]IterInfo, parallel)
   109  	for i, resp := range resps {
   110  		if resp.IterID == emptyID {
   111  			continue
   112  		}
   113  		iterInfo := IterInfo{
   114  			Shard:  shards[i],
   115  			IterID: resp.IterID,
   116  		}
   117  		iterInfoSets[i%parallel] = append(iterInfoSets[i%parallel], iterInfo)
   118  	}
   119  
   120  	for i, set := range iterInfoSets {
   121  		if len(set) == 0 {
   122  			readers[i] = new(TableReader)
   123  			continue
   124  		}
   125  		reader := &TableReader{
   126  			engine:      t.engine,
   127  			txnOperator: t.txnOperator,
   128  			ctx:         ctx,
   129  			iterInfos:   set,
   130  		}
   131  		readers[i] = reader
   132  	}
   133  
   134  	return
   135  }
   136  
   137  var _ engine.Reader = new(TableReader)
   138  
   139  func (t *TableReader) Read(ctx context.Context, colNames []string, plan *plan.Expr, mp *mpool.MPool, _ engine.VectorPool) (*batch.Batch, error) {
   140  	if t == nil {
   141  		return nil, nil
   142  	}
   143  
   144  	for {
   145  
   146  		if len(t.iterInfos) == 0 {
   147  			return nil, nil
   148  		}
   149  
   150  		resps, err := DoTxnRequest[ReadResp](
   151  			t.ctx,
   152  			t.txnOperator,
   153  			true,
   154  			thisShard(t.iterInfos[0].Shard),
   155  			OpRead,
   156  			&ReadReq{
   157  				IterID:   t.iterInfos[0].IterID,
   158  				ColNames: colNames,
   159  			},
   160  		)
   161  		if err != nil {
   162  			return nil, err
   163  		}
   164  
   165  		resp := resps[0]
   166  
   167  		if resp.Batch == nil {
   168  			// no more
   169  			t.iterInfos = t.iterInfos[1:]
   170  			continue
   171  		}
   172  
   173  		logutil.Debug(testutil.OperatorCatchBatch("table reader", resp.Batch))
   174  		return resp.Batch, nil
   175  	}
   176  
   177  }
   178  
   179  func (t *TableReader) SetFilterZM(objectio.ZoneMap) {
   180  }
   181  
   182  func (t *TableReader) GetOrderBy() []*plan.OrderBySpec {
   183  	return nil
   184  }
   185  
   186  func (t *TableReader) SetOrderBy([]*plan.OrderBySpec) {
   187  }
   188  
   189  func (t *TableReader) Close() error {
   190  	if t == nil {
   191  		return nil
   192  	}
   193  	for _, info := range t.iterInfos {
   194  		_, err := DoTxnRequest[CloseTableIterResp](
   195  			t.ctx,
   196  			t.txnOperator,
   197  			true,
   198  			thisShard(info.Shard),
   199  			OpCloseTableIter,
   200  			&CloseTableIterReq{
   201  				IterID: info.IterID,
   202  			},
   203  		)
   204  		_ = err // ignore error
   205  	}
   206  	return nil
   207  }
   208  
   209  func (t *Table) GetEngineType() engine.EngineType {
   210  	return engine.Memory
   211  }
   212  
   213  func (t *Table) Ranges(_ context.Context, _ []*plan.Expr) (engine.Ranges, error) {
   214  	// return encoded shard ids
   215  	nodes := getTNServices(t.engine.cluster)
   216  	shards := make(ShardIdSlice, 0, len(nodes)*8)
   217  	for _, node := range nodes {
   218  		for _, shard := range node.Shards {
   219  			id := make([]byte, 8)
   220  			binary.LittleEndian.PutUint64(id, shard.ShardID)
   221  			shards = append(shards, id...)
   222  		}
   223  	}
   224  	return &shards, nil
   225  }
   226  
   227  type ShardIdSlice []byte
   228  
   229  var _ engine.Ranges = (*ShardIdSlice)(nil)
   230  
   231  func (s *ShardIdSlice) GetBytes(i int) []byte {
   232  	return (*s)[i*8 : (i+1)*8]
   233  }
   234  
   235  func (s *ShardIdSlice) Len() int {
   236  	return len(*s) / 8
   237  }
   238  
   239  func (s *ShardIdSlice) Append(bs []byte) {
   240  	*s = append(*s, bs...)
   241  }
   242  
   243  func (s *ShardIdSlice) Size() int {
   244  	return len(*s)
   245  }
   246  
   247  func (s *ShardIdSlice) SetBytes(bs []byte) {
   248  	*s = bs
   249  }
   250  
   251  func (s *ShardIdSlice) GetAllBytes() []byte {
   252  	return *s
   253  }
   254  
   255  func (s *ShardIdSlice) Slice(i, j int) []byte {
   256  	return (*s)[i*8 : j*8]
   257  }
   258  
   259  func (s *ShardIdSlice) Get(i int) uint64 {
   260  	return binary.LittleEndian.Uint64(s.GetBytes(i))
   261  }