github.com/whtcorpsinc/MilevaDB-Prod@v0.0.0-20211104133533-f57f4be3b597/allegrosql/select_result.go (about)

     1  // Copyright 2020 WHTCORPS INC, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package allegrosql
    15  
    16  import (
    17  	"bytes"
    18  	"context"
    19  	"fmt"
    20  	"sort"
    21  	"strconv"
    22  	"sync/atomic"
    23  	"time"
    24  
    25  	"github.com/whtcorpsinc/BerolinaSQL/terror"
    26  	"github.com/whtcorpsinc/errors"
    27  	"github.com/whtcorpsinc/fidelpb/go-fidelpb"
    28  	"github.com/whtcorpsinc/milevadb/causetstore/einsteindb"
    29  	"github.com/whtcorpsinc/milevadb/causetstore/einsteindb/einsteindbrpc"
    30  	"github.com/whtcorpsinc/milevadb/ekv"
    31  	"github.com/whtcorpsinc/milevadb/errno"
    32  	"github.com/whtcorpsinc/milevadb/metrics"
    33  	"github.com/whtcorpsinc/milevadb/soliton/chunk"
    34  	"github.com/whtcorpsinc/milevadb/soliton/codec"
    35  	"github.com/whtcorpsinc/milevadb/soliton/execdetails"
    36  	"github.com/whtcorpsinc/milevadb/soliton/logutil"
    37  	"github.com/whtcorpsinc/milevadb/soliton/memory"
    38  	"github.com/whtcorpsinc/milevadb/statistics"
    39  	"github.com/whtcorpsinc/milevadb/stochastikctx"
    40  	"github.com/whtcorpsinc/milevadb/types"
    41  	"go.uber.org/zap"
    42  )
    43  
    44  var (
    45  	errQueryInterrupted = terror.ClassInterlockingDirectorate.NewStd(errno.ErrQueryInterrupted)
    46  )
    47  
    48  var (
    49  	_ SelectResult = (*selectResult)(nil)
    50  	_ SelectResult = (*streamResult)(nil)
    51  )
    52  
    53  // SelectResult is an iterator of interlock partial results.
    54  type SelectResult interface {
    55  	// Fetch fetches partial results from client.
    56  	Fetch(context.Context)
    57  	// NextRaw gets the next raw result.
    58  	NextRaw(context.Context) ([]byte, error)
    59  	// Next reads the data into chunk.
    60  	Next(context.Context, *chunk.Chunk) error
    61  	// Close closes the iterator.
    62  	Close() error
    63  }
    64  
    65  type selectResult struct {
    66  	label string
    67  	resp  ekv.Response
    68  
    69  	rowLen     int
    70  	fieldTypes []*types.FieldType
    71  	ctx        stochastikctx.Context
    72  
    73  	selectResp             *fidelpb.SelectResponse
    74  	selectRespSize         int64 // record the selectResp.Size() when it is initialized.
    75  	respChkIdx             int
    76  	respChunkCausetDecoder *chunk.CausetDecoder
    77  
    78  	feedback     *statistics.QueryFeedback
    79  	partialCount int64 // number of partial results.
    80  	sqlType      string
    81  	encodeType   fidelpb.EncodeType
    82  
    83  	// copCausetIDs contains all CausetTasks' planIDs,
    84  	// which help to defCauslect CausetTasks' runtime stats.
    85  	copCausetIDs []int
    86  	rootCausetID int
    87  
    88  	fetchDuration    time.Duration
    89  	durationReported bool
    90  	memTracker       *memory.Tracker
    91  
    92  	stats *selectResultRuntimeStats
    93  }
    94  
    95  func (r *selectResult) Fetch(ctx context.Context) {
    96  }
    97  
    98  func (r *selectResult) fetchResp(ctx context.Context) error {
    99  	for {
   100  		r.respChkIdx = 0
   101  		startTime := time.Now()
   102  		resultSubset, err := r.resp.Next(ctx)
   103  		duration := time.Since(startTime)
   104  		r.fetchDuration += duration
   105  		if err != nil {
   106  			return errors.Trace(err)
   107  		}
   108  		if r.selectResp != nil {
   109  			r.memConsume(-atomic.LoadInt64(&r.selectRespSize))
   110  		}
   111  		if resultSubset == nil {
   112  			r.selectResp = nil
   113  			atomic.StoreInt64(&r.selectRespSize, 0)
   114  			if !r.durationReported {
   115  				// final round of fetch
   116  				// TODO: Add a label to distinguish between success or failure.
   117  				// https://github.com/whtcorpsinc/milevadb/issues/11397
   118  				metrics.DistALLEGROSQLQueryHistogram.WithLabelValues(r.label, r.sqlType).Observe(r.fetchDuration.Seconds())
   119  				r.durationReported = true
   120  			}
   121  			return nil
   122  		}
   123  		r.selectResp = new(fidelpb.SelectResponse)
   124  		err = r.selectResp.Unmarshal(resultSubset.GetData())
   125  		if err != nil {
   126  			return errors.Trace(err)
   127  		}
   128  		respSize := int64(r.selectResp.Size())
   129  		atomic.StoreInt64(&r.selectRespSize, respSize)
   130  		r.memConsume(respSize)
   131  		if err := r.selectResp.Error; err != nil {
   132  			return terror.ClassEinsteinDB.Synthesize(terror.ErrCode(err.Code), err.Msg)
   133  		}
   134  		sessVars := r.ctx.GetStochastikVars()
   135  		if atomic.LoadUint32(&sessVars.Killed) == 1 {
   136  			return errors.Trace(errQueryInterrupted)
   137  		}
   138  		sc := sessVars.StmtCtx
   139  		for _, warning := range r.selectResp.Warnings {
   140  			sc.AppendWarning(terror.ClassEinsteinDB.Synthesize(terror.ErrCode(warning.Code), warning.Msg))
   141  		}
   142  		r.feedback.UFIDelate(resultSubset.GetStartKey(), r.selectResp.OutputCounts)
   143  		r.partialCount++
   144  
   145  		hasStats, ok := resultSubset.(CopRuntimeStats)
   146  		if ok {
   147  			copStats := hasStats.GetCopRuntimeStats()
   148  			if copStats != nil {
   149  				r.uFIDelateCopRuntimeStats(ctx, copStats, resultSubset.RespTime())
   150  				copStats.CopTime = duration
   151  				sc.MergeInterDircDetails(&copStats.InterDircDetails, nil)
   152  			}
   153  		}
   154  		if len(r.selectResp.Chunks) != 0 {
   155  			break
   156  		}
   157  	}
   158  	return nil
   159  }
   160  
   161  func (r *selectResult) Next(ctx context.Context, chk *chunk.Chunk) error {
   162  	chk.Reset()
   163  	if r.selectResp == nil || r.respChkIdx == len(r.selectResp.Chunks) {
   164  		err := r.fetchResp(ctx)
   165  		if err != nil {
   166  			return err
   167  		}
   168  		if r.selectResp == nil {
   169  			return nil
   170  		}
   171  	}
   172  	// TODO(Shenghui Wu): add metrics
   173  	switch r.selectResp.GetEncodeType() {
   174  	case fidelpb.EncodeType_TypeDefault:
   175  		return r.readFromDefault(ctx, chk)
   176  	case fidelpb.EncodeType_TypeChunk:
   177  		return r.readFromChunk(ctx, chk)
   178  	}
   179  	return errors.Errorf("unsupported encode type:%v", r.encodeType)
   180  }
   181  
   182  // NextRaw returns the next raw partial result.
   183  func (r *selectResult) NextRaw(ctx context.Context) (data []byte, err error) {
   184  	resultSubset, err := r.resp.Next(ctx)
   185  	r.partialCount++
   186  	r.feedback.Invalidate()
   187  	if resultSubset != nil && err == nil {
   188  		data = resultSubset.GetData()
   189  	}
   190  	return data, err
   191  }
   192  
   193  func (r *selectResult) readFromDefault(ctx context.Context, chk *chunk.Chunk) error {
   194  	for !chk.IsFull() {
   195  		if r.respChkIdx == len(r.selectResp.Chunks) {
   196  			err := r.fetchResp(ctx)
   197  			if err != nil || r.selectResp == nil {
   198  				return err
   199  			}
   200  		}
   201  		err := r.readRowsData(chk)
   202  		if err != nil {
   203  			return err
   204  		}
   205  		if len(r.selectResp.Chunks[r.respChkIdx].RowsData) == 0 {
   206  			r.respChkIdx++
   207  		}
   208  	}
   209  	return nil
   210  }
   211  
   212  func (r *selectResult) readFromChunk(ctx context.Context, chk *chunk.Chunk) error {
   213  	if r.respChunkCausetDecoder == nil {
   214  		r.respChunkCausetDecoder = chunk.NewCausetDecoder(
   215  			chunk.NewChunkWithCapacity(r.fieldTypes, 0),
   216  			r.fieldTypes,
   217  		)
   218  	}
   219  
   220  	for !chk.IsFull() {
   221  		if r.respChkIdx == len(r.selectResp.Chunks) {
   222  			err := r.fetchResp(ctx)
   223  			if err != nil || r.selectResp == nil {
   224  				return err
   225  			}
   226  		}
   227  
   228  		if r.respChunkCausetDecoder.IsFinished() {
   229  			r.respChunkCausetDecoder.Reset(r.selectResp.Chunks[r.respChkIdx].RowsData)
   230  		}
   231  		// If the next chunk size is greater than required rows * 0.8, reuse the memory of the next chunk and return
   232  		// immediately. Otherwise, splice the data to one chunk and wait the next chunk.
   233  		if r.respChunkCausetDecoder.RemainedRows() > int(float64(chk.RequiredRows())*0.8) {
   234  			if chk.NumRows() > 0 {
   235  				return nil
   236  			}
   237  			r.respChunkCausetDecoder.ReuseIntermChk(chk)
   238  			r.respChkIdx++
   239  			return nil
   240  		}
   241  		r.respChunkCausetDecoder.Decode(chk)
   242  		if r.respChunkCausetDecoder.IsFinished() {
   243  			r.respChkIdx++
   244  		}
   245  	}
   246  	return nil
   247  }
   248  
   249  func (r *selectResult) uFIDelateCopRuntimeStats(ctx context.Context, copStats *einsteindb.CopRuntimeStats, respTime time.Duration) {
   250  	callee := copStats.CalleeAddress
   251  	if r.rootCausetID <= 0 || r.ctx.GetStochastikVars().StmtCtx.RuntimeStatsDefCausl == nil || callee == "" {
   252  		return
   253  	}
   254  	if len(r.selectResp.GetInterDircutionSummaries()) != len(r.copCausetIDs) {
   255  		logutil.Logger(ctx).Error("invalid cop task execution summaries length",
   256  			zap.Int("expected", len(r.copCausetIDs)),
   257  			zap.Int("received", len(r.selectResp.GetInterDircutionSummaries())))
   258  
   259  		return
   260  	}
   261  	if r.stats == nil {
   262  		id := r.rootCausetID
   263  		r.stats = &selectResultRuntimeStats{
   264  			backoffSleep: make(map[string]time.Duration),
   265  			rpcStat:      einsteindb.NewRegionRequestRuntimeStats(),
   266  		}
   267  		r.ctx.GetStochastikVars().StmtCtx.RuntimeStatsDefCausl.RegisterStats(id, r.stats)
   268  	}
   269  	r.stats.mergeCopRuntimeStats(copStats, respTime)
   270  
   271  	for i, detail := range r.selectResp.GetInterDircutionSummaries() {
   272  		if detail != nil && detail.TimeProcessedNs != nil &&
   273  			detail.NumProducedRows != nil && detail.NumIterations != nil {
   274  			planID := r.copCausetIDs[i]
   275  			r.ctx.GetStochastikVars().StmtCtx.RuntimeStatsDefCausl.
   276  				RecordOneCopTask(planID, callee, detail)
   277  		}
   278  	}
   279  }
   280  
   281  func (r *selectResult) readRowsData(chk *chunk.Chunk) (err error) {
   282  	rowsData := r.selectResp.Chunks[r.respChkIdx].RowsData
   283  	causetDecoder := codec.NewCausetDecoder(chk, r.ctx.GetStochastikVars().Location())
   284  	for !chk.IsFull() && len(rowsData) > 0 {
   285  		for i := 0; i < r.rowLen; i++ {
   286  			rowsData, err = causetDecoder.DecodeOne(rowsData, i, r.fieldTypes[i])
   287  			if err != nil {
   288  				return err
   289  			}
   290  		}
   291  	}
   292  	r.selectResp.Chunks[r.respChkIdx].RowsData = rowsData
   293  	return nil
   294  }
   295  
   296  func (r *selectResult) memConsume(bytes int64) {
   297  	if r.memTracker != nil {
   298  		r.memTracker.Consume(bytes)
   299  	}
   300  }
   301  
   302  // Close closes selectResult.
   303  func (r *selectResult) Close() error {
   304  	if r.feedback.Actual() >= 0 {
   305  		metrics.DistALLEGROSQLScanKeysHistogram.Observe(float64(r.feedback.Actual()))
   306  	}
   307  	metrics.DistALLEGROSQLPartialCountHistogram.Observe(float64(r.partialCount))
   308  	respSize := atomic.SwapInt64(&r.selectRespSize, 0)
   309  	if respSize > 0 {
   310  		r.memConsume(-respSize)
   311  	}
   312  	return r.resp.Close()
   313  }
   314  
   315  // CopRuntimeStats is a interface uses to check whether the result has cop runtime stats.
   316  type CopRuntimeStats interface {
   317  	// GetCopRuntimeStats gets the cop runtime stats information.
   318  	GetCopRuntimeStats() *einsteindb.CopRuntimeStats
   319  }
   320  
   321  type selectResultRuntimeStats struct {
   322  	copRespTime      []time.Duration
   323  	procKeys         []int64
   324  	backoffSleep     map[string]time.Duration
   325  	totalProcessTime time.Duration
   326  	totalWaitTime    time.Duration
   327  	rpcStat          einsteindb.RegionRequestRuntimeStats
   328  	CoprCacheHitNum  int64
   329  }
   330  
   331  func (s *selectResultRuntimeStats) mergeCopRuntimeStats(copStats *einsteindb.CopRuntimeStats, respTime time.Duration) {
   332  	s.copRespTime = append(s.copRespTime, respTime)
   333  	s.procKeys = append(s.procKeys, copStats.ProcessedKeys)
   334  
   335  	for k, v := range copStats.BackoffSleep {
   336  		s.backoffSleep[k] += v
   337  	}
   338  	s.totalProcessTime += copStats.ProcessTime
   339  	s.totalWaitTime += copStats.WaitTime
   340  	s.rpcStat.Merge(copStats.RegionRequestRuntimeStats)
   341  	if copStats.CoprCacheHit {
   342  		s.CoprCacheHitNum++
   343  	}
   344  }
   345  
   346  func (s *selectResultRuntimeStats) Clone() execdetails.RuntimeStats {
   347  	newRs := selectResultRuntimeStats{
   348  		copRespTime:  make([]time.Duration, 0, len(s.copRespTime)),
   349  		procKeys:     make([]int64, 0, len(s.procKeys)),
   350  		backoffSleep: make(map[string]time.Duration, len(s.backoffSleep)),
   351  		rpcStat:      einsteindb.NewRegionRequestRuntimeStats(),
   352  	}
   353  	newRs.copRespTime = append(newRs.copRespTime, s.copRespTime...)
   354  	newRs.procKeys = append(newRs.procKeys, s.procKeys...)
   355  	for k, v := range s.backoffSleep {
   356  		newRs.backoffSleep[k] += v
   357  	}
   358  	newRs.totalProcessTime += s.totalProcessTime
   359  	newRs.totalWaitTime += s.totalWaitTime
   360  	for k, v := range s.rpcStat.Stats {
   361  		newRs.rpcStat.Stats[k] = v
   362  	}
   363  	return &newRs
   364  }
   365  
   366  func (s *selectResultRuntimeStats) Merge(rs execdetails.RuntimeStats) {
   367  	other, ok := rs.(*selectResultRuntimeStats)
   368  	if !ok {
   369  		return
   370  	}
   371  	s.copRespTime = append(s.copRespTime, other.copRespTime...)
   372  	s.procKeys = append(s.procKeys, other.procKeys...)
   373  
   374  	for k, v := range other.backoffSleep {
   375  		s.backoffSleep[k] += v
   376  	}
   377  	s.totalProcessTime += other.totalProcessTime
   378  	s.totalWaitTime += other.totalWaitTime
   379  	s.rpcStat.Merge(other.rpcStat)
   380  	s.CoprCacheHitNum += other.CoprCacheHitNum
   381  }
   382  
   383  func (s *selectResultRuntimeStats) String() string {
   384  	buf := bytes.NewBuffer(nil)
   385  	if len(s.copRespTime) > 0 {
   386  		size := len(s.copRespTime)
   387  		if size == 1 {
   388  			buf.WriteString(fmt.Sprintf("cop_task: {num: 1, max:%v, proc_keys: %v", s.copRespTime[0], s.procKeys[0]))
   389  		} else {
   390  			sort.Slice(s.copRespTime, func(i, j int) bool {
   391  				return s.copRespTime[i] < s.copRespTime[j]
   392  			})
   393  			vMax, vMin := s.copRespTime[size-1], s.copRespTime[0]
   394  			vP95 := s.copRespTime[size*19/20]
   395  			sum := 0.0
   396  			for _, t := range s.copRespTime {
   397  				sum += float64(t)
   398  			}
   399  			vAvg := time.Duration(sum / float64(size))
   400  
   401  			sort.Slice(s.procKeys, func(i, j int) bool {
   402  				return s.procKeys[i] < s.procKeys[j]
   403  			})
   404  			keyMax := s.procKeys[size-1]
   405  			keyP95 := s.procKeys[size*19/20]
   406  			buf.WriteString(fmt.Sprintf("cop_task: {num: %v, max: %v, min: %v, avg: %v, p95: %v", size, vMax, vMin, vAvg, vP95))
   407  			if keyMax > 0 {
   408  				buf.WriteString(", max_proc_keys: ")
   409  				buf.WriteString(strconv.FormatInt(keyMax, 10))
   410  				buf.WriteString(", p95_proc_keys: ")
   411  				buf.WriteString(strconv.FormatInt(keyP95, 10))
   412  			}
   413  			if s.totalProcessTime > 0 {
   414  				buf.WriteString(", tot_proc: ")
   415  				buf.WriteString(s.totalProcessTime.String())
   416  				if s.totalWaitTime > 0 {
   417  					buf.WriteString(", tot_wait: ")
   418  					buf.WriteString(s.totalWaitTime.String())
   419  				}
   420  			}
   421  		}
   422  		copRPC := s.rpcStat.Stats[einsteindbrpc.CmdCop]
   423  		if copRPC != nil && copRPC.Count > 0 {
   424  			delete(s.rpcStat.Stats, einsteindbrpc.CmdCop)
   425  			buf.WriteString(", rpc_num: ")
   426  			buf.WriteString(strconv.FormatInt(copRPC.Count, 10))
   427  			buf.WriteString(", rpc_time: ")
   428  			buf.WriteString(time.Duration(copRPC.Consume).String())
   429  		}
   430  		buf.WriteString(fmt.Sprintf(", copr_cache_hit_ratio: %v",
   431  			strconv.FormatFloat(float64(s.CoprCacheHitNum)/float64(len(s.copRespTime)), 'f', 2, 64)))
   432  		buf.WriteString("}")
   433  	}
   434  
   435  	rpcStatsStr := s.rpcStat.String()
   436  	if len(rpcStatsStr) > 0 {
   437  		buf.WriteString(", ")
   438  		buf.WriteString(rpcStatsStr)
   439  	}
   440  
   441  	if len(s.backoffSleep) > 0 {
   442  		buf.WriteString(", backoff{")
   443  		idx := 0
   444  		for k, d := range s.backoffSleep {
   445  			if idx > 0 {
   446  				buf.WriteString(", ")
   447  			}
   448  			idx++
   449  			buf.WriteString(fmt.Sprintf("%s: %s", k, d.String()))
   450  		}
   451  		buf.WriteString("}")
   452  	}
   453  	return buf.String()
   454  }
   455  
   456  // Tp implements the RuntimeStats interface.
   457  func (s *selectResultRuntimeStats) Tp() int {
   458  	return execdetails.TpSelectResultRuntimeStats
   459  }