github.com/whtcorpsinc/MilevaDB-Prod@v0.0.0-20211104133533-f57f4be3b597/interlock/infoschema_reader.go (about)

     1  // Copyright 2020 WHTCORPS INC, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package interlock
    15  
    16  import (
    17  	"context"
    18  	"encoding/json"
    19  	"fmt"
    20  	"io/ioutil"
    21  	"net/http"
    22  	"sort"
    23  	"strconv"
    24  	"strings"
    25  	"sync"
    26  	"time"
    27  
    28  	"github.com/cznic/mathutil"
    29  	"github.com/whtcorpsinc/BerolinaSQL/allegrosql"
    30  	"github.com/whtcorpsinc/BerolinaSQL/charset"
    31  	"github.com/whtcorpsinc/BerolinaSQL/perceptron"
    32  	"github.com/whtcorpsinc/BerolinaSQL/terror"
    33  	"github.com/whtcorpsinc/errors"
    34  	"github.com/whtcorpsinc/milevadb/causet"
    35  	causetembedded "github.com/whtcorpsinc/milevadb/causet/embedded"
    36  	"github.com/whtcorpsinc/milevadb/causetstore/einsteindb"
    37  	"github.com/whtcorpsinc/milevadb/causetstore/helper"
    38  	"github.com/whtcorpsinc/milevadb/petri"
    39  	"github.com/whtcorpsinc/milevadb/petri/infosync"
    40  	"github.com/whtcorpsinc/milevadb/privilege"
    41  	"github.com/whtcorpsinc/milevadb/schemareplicant"
    42  	"github.com/whtcorpsinc/milevadb/soliton"
    43  	"github.com/whtcorpsinc/milevadb/soliton/FIDelapi"
    44  	"github.com/whtcorpsinc/milevadb/soliton/chunk"
    45  	"github.com/whtcorpsinc/milevadb/soliton/defCauslate"
    46  	"github.com/whtcorpsinc/milevadb/soliton/set"
    47  	"github.com/whtcorpsinc/milevadb/soliton/sqlexec"
    48  	"github.com/whtcorpsinc/milevadb/soliton/stmtsummary"
    49  	"github.com/whtcorpsinc/milevadb/soliton/stringutil"
    50  	"github.com/whtcorpsinc/milevadb/spacetime/autoid"
    51  	"github.com/whtcorpsinc/milevadb/statistics"
    52  	"github.com/whtcorpsinc/milevadb/stochastikctx"
    53  	"github.com/whtcorpsinc/milevadb/stochastikctx/variable"
    54  	"github.com/whtcorpsinc/milevadb/types"
    55  	binaryJson "github.com/whtcorpsinc/milevadb/types/json"
    56  	"go.etcd.io/etcd/clientv3"
    57  )
    58  
    59  type memblockRetriever struct {
    60  	dummyCloser
    61  	causet      *perceptron.BlockInfo
    62  	defCausumns []*perceptron.DeferredCausetInfo
    63  	rows        [][]types.Causet
    64  	rowIdx      int
    65  	retrieved   bool
    66  	initialized bool
    67  }
    68  
    69  // retrieve implements the schemareplicantRetriever interface
    70  func (e *memblockRetriever) retrieve(ctx context.Context, sctx stochastikctx.Context) ([][]types.Causet, error) {
    71  	if e.retrieved {
    72  		return nil, nil
    73  	}
    74  
    75  	//Cache the ret full rows in schemataRetriever
    76  	if !e.initialized {
    77  		is := schemareplicant.GetSchemaReplicant(sctx)
    78  		dbs := is.AllSchemas()
    79  		sort.Sort(schemareplicant.SchemasSorter(dbs))
    80  		var err error
    81  		switch e.causet.Name.O {
    82  		case schemareplicant.BlockSchemata:
    83  			e.setDataFromSchemata(sctx, dbs)
    84  		case schemareplicant.BlockStatistics:
    85  			e.setDataForStatistics(sctx, dbs)
    86  		case schemareplicant.BlockBlocks:
    87  			err = e.setDataFromBlocks(sctx, dbs)
    88  		case schemareplicant.BlockSequences:
    89  			e.setDataFromSequences(sctx, dbs)
    90  		case schemareplicant.BlockPartitions:
    91  			err = e.setDataFromPartitions(sctx, dbs)
    92  		case schemareplicant.BlockClusterInfo:
    93  			err = e.dataForMilevaDBClusterInfo(sctx)
    94  		case schemareplicant.BlockAnalyzeStatus:
    95  			e.setDataForAnalyzeStatus(sctx)
    96  		case schemareplicant.BlockMilevaDBIndexes:
    97  			e.setDataFromIndexes(sctx, dbs)
    98  		case schemareplicant.BlockViews:
    99  			e.setDataFromViews(sctx, dbs)
   100  		case schemareplicant.BlockEngines:
   101  			e.setDataFromEngines()
   102  		case schemareplicant.BlockCharacterSets:
   103  			e.setDataFromCharacterSets()
   104  		case schemareplicant.BlockDefCauslations:
   105  			e.setDataFromDefCauslations()
   106  		case schemareplicant.BlockKeyDeferredCauset:
   107  			e.setDataFromKeyDeferredCausetUsage(sctx, dbs)
   108  		case schemareplicant.BlockMetricBlocks:
   109  			e.setDataForMetricBlocks(sctx)
   110  		case schemareplicant.BlockProfiling:
   111  			e.setDataForPseudoProfiling(sctx)
   112  		case schemareplicant.BlockDefCauslationCharacterSetApplicability:
   113  			e.dataForDefCauslationCharacterSetApplicability()
   114  		case schemareplicant.BlockProcesslist:
   115  			e.setDataForProcessList(sctx)
   116  		case schemareplicant.ClusterBlockProcesslist:
   117  			err = e.setDataForClusterProcessList(sctx)
   118  		case schemareplicant.BlockUserPrivileges:
   119  			e.setDataFromUserPrivileges(sctx)
   120  		case schemareplicant.BlockEinsteinDBRegionStatus:
   121  			err = e.setDataForEinsteinDBRegionStatus(sctx)
   122  		case schemareplicant.BlockEinsteinDBRegionPeers:
   123  			err = e.setDataForEinsteinDBRegionPeers(sctx)
   124  		case schemareplicant.BlockMilevaDBHotRegions:
   125  			err = e.setDataForMilevaDBHotRegions(sctx)
   126  		case schemareplicant.BlockConstraints:
   127  			e.setDataFromBlockConstraints(sctx, dbs)
   128  		case schemareplicant.BlockStochastikVar:
   129  			err = e.setDataFromStochastikVar(sctx)
   130  		case schemareplicant.BlockMilevaDBServersInfo:
   131  			err = e.setDataForServersInfo()
   132  		case schemareplicant.BlockTiFlashReplica:
   133  			e.dataForBlockTiFlashReplica(sctx, dbs)
   134  		case schemareplicant.BlockEinsteinDBStoreStatus:
   135  			err = e.dataForEinsteinDBStoreStatus(sctx)
   136  		case schemareplicant.BlockStatementsSummary,
   137  			schemareplicant.BlockStatementsSummaryHistory,
   138  			schemareplicant.ClusterBlockStatementsSummary,
   139  			schemareplicant.ClusterBlockStatementsSummaryHistory:
   140  			err = e.setDataForStatementsSummary(sctx, e.causet.Name.O)
   141  		}
   142  		if err != nil {
   143  			return nil, err
   144  		}
   145  		e.initialized = true
   146  	}
   147  
   148  	//Adjust the amount of each return
   149  	maxCount := 1024
   150  	retCount := maxCount
   151  	if e.rowIdx+maxCount > len(e.rows) {
   152  		retCount = len(e.rows) - e.rowIdx
   153  		e.retrieved = true
   154  	}
   155  	ret := make([][]types.Causet, retCount)
   156  	for i := e.rowIdx; i < e.rowIdx+retCount; i++ {
   157  		ret[i-e.rowIdx] = e.rows[i]
   158  	}
   159  	e.rowIdx += retCount
   160  	return adjustDeferredCausets(ret, e.defCausumns, e.causet), nil
   161  }
   162  
   163  func getEventCountAllBlock(ctx stochastikctx.Context) (map[int64]uint64, error) {
   164  	rows, _, err := ctx.(sqlexec.RestrictedALLEGROSQLInterlockingDirectorate).InterDircRestrictedALLEGROSQL("select block_id, count from allegrosql.stats_spacetime")
   165  	if err != nil {
   166  		return nil, err
   167  	}
   168  	rowCountMap := make(map[int64]uint64, len(rows))
   169  	for _, event := range rows {
   170  		blockID := event.GetInt64(0)
   171  		rowCnt := event.GetUint64(1)
   172  		rowCountMap[blockID] = rowCnt
   173  	}
   174  	return rowCountMap, nil
   175  }
   176  
   177  type blockHistID struct {
   178  	blockID int64
   179  	histID  int64
   180  }
   181  
   182  func getDefCausLengthAllBlocks(ctx stochastikctx.Context) (map[blockHistID]uint64, error) {
   183  	rows, _, err := ctx.(sqlexec.RestrictedALLEGROSQLInterlockingDirectorate).InterDircRestrictedALLEGROSQL("select block_id, hist_id, tot_defCaus_size from allegrosql.stats_histograms where is_index = 0")
   184  	if err != nil {
   185  		return nil, err
   186  	}
   187  	defCausLengthMap := make(map[blockHistID]uint64, len(rows))
   188  	for _, event := range rows {
   189  		blockID := event.GetInt64(0)
   190  		histID := event.GetInt64(1)
   191  		totalSize := event.GetInt64(2)
   192  		if totalSize < 0 {
   193  			totalSize = 0
   194  		}
   195  		defCausLengthMap[blockHistID{blockID: blockID, histID: histID}] = uint64(totalSize)
   196  	}
   197  	return defCausLengthMap, nil
   198  }
   199  
   200  func getDataAndIndexLength(info *perceptron.BlockInfo, physicalID int64, rowCount uint64, defCausumnLengthMap map[blockHistID]uint64) (uint64, uint64) {
   201  	defCausumnLength := make(map[string]uint64, len(info.DeferredCausets))
   202  	for _, defCaus := range info.DeferredCausets {
   203  		if defCaus.State != perceptron.StatePublic {
   204  			continue
   205  		}
   206  		length := defCaus.FieldType.StorageLength()
   207  		if length != types.VarStorageLen {
   208  			defCausumnLength[defCaus.Name.L] = rowCount * uint64(length)
   209  		} else {
   210  			length := defCausumnLengthMap[blockHistID{blockID: physicalID, histID: defCaus.ID}]
   211  			defCausumnLength[defCaus.Name.L] = length
   212  		}
   213  	}
   214  	dataLength, indexLength := uint64(0), uint64(0)
   215  	for _, length := range defCausumnLength {
   216  		dataLength += length
   217  	}
   218  	for _, idx := range info.Indices {
   219  		if idx.State != perceptron.StatePublic {
   220  			continue
   221  		}
   222  		for _, defCaus := range idx.DeferredCausets {
   223  			if defCaus.Length == types.UnspecifiedLength {
   224  				indexLength += defCausumnLength[defCaus.Name.L]
   225  			} else {
   226  				indexLength += rowCount * uint64(defCaus.Length)
   227  			}
   228  		}
   229  	}
   230  	return dataLength, indexLength
   231  }
   232  
   233  type statsCache struct {
   234  	mu            sync.RWMutex
   235  	modifyTime    time.Time
   236  	blockEvents   map[int64]uint64
   237  	defCausLength map[blockHistID]uint64
   238  }
   239  
   240  var blockStatsCache = &statsCache{}
   241  
   242  // BlockStatsCacheExpiry is the expiry time for causet stats cache.
   243  var BlockStatsCacheExpiry = 3 * time.Second
   244  
   245  func (c *statsCache) get(ctx stochastikctx.Context) (map[int64]uint64, map[blockHistID]uint64, error) {
   246  	c.mu.RLock()
   247  	if time.Since(c.modifyTime) < BlockStatsCacheExpiry {
   248  		blockEvents, defCausLength := c.blockEvents, c.defCausLength
   249  		c.mu.RUnlock()
   250  		return blockEvents, defCausLength, nil
   251  	}
   252  	c.mu.RUnlock()
   253  
   254  	c.mu.Lock()
   255  	defer c.mu.Unlock()
   256  	if time.Since(c.modifyTime) < BlockStatsCacheExpiry {
   257  		return c.blockEvents, c.defCausLength, nil
   258  	}
   259  	blockEvents, err := getEventCountAllBlock(ctx)
   260  	if err != nil {
   261  		return nil, nil, err
   262  	}
   263  	defCausLength, err := getDefCausLengthAllBlocks(ctx)
   264  	if err != nil {
   265  		return nil, nil, err
   266  	}
   267  
   268  	c.blockEvents = blockEvents
   269  	c.defCausLength = defCausLength
   270  	c.modifyTime = time.Now()
   271  	return blockEvents, defCausLength, nil
   272  }
   273  
   274  func getAutoIncrementID(ctx stochastikctx.Context, schemaReplicant *perceptron.DBInfo, tblInfo *perceptron.BlockInfo) (int64, error) {
   275  	is := schemareplicant.GetSchemaReplicant(ctx)
   276  	tbl, err := is.BlockByName(schemaReplicant.Name, tblInfo.Name)
   277  	if err != nil {
   278  		return 0, err
   279  	}
   280  	return tbl.SlabPredictors(ctx).Get(autoid.EventIDAllocType).Base() + 1, nil
   281  }
   282  
   283  func (e *memblockRetriever) setDataFromSchemata(ctx stochastikctx.Context, schemas []*perceptron.DBInfo) {
   284  	checker := privilege.GetPrivilegeManager(ctx)
   285  	rows := make([][]types.Causet, 0, len(schemas))
   286  
   287  	for _, schemaReplicant := range schemas {
   288  
   289  		charset := allegrosql.DefaultCharset
   290  		defCauslation := allegrosql.DefaultDefCauslationName
   291  
   292  		if len(schemaReplicant.Charset) > 0 {
   293  			charset = schemaReplicant.Charset // Overwrite default
   294  		}
   295  
   296  		if len(schemaReplicant.DefCauslate) > 0 {
   297  			defCauslation = schemaReplicant.DefCauslate // Overwrite default
   298  		}
   299  
   300  		if checker != nil && !checker.RequestVerification(ctx.GetStochastikVars().ActiveRoles, schemaReplicant.Name.L, "", "", allegrosql.AllPrivMask) {
   301  			continue
   302  		}
   303  		record := types.MakeCausets(
   304  			schemareplicant.CatalogVal, // CATALOG_NAME
   305  			schemaReplicant.Name.O,     // SCHEMA_NAME
   306  			charset,                    // DEFAULT_CHARACTER_SET_NAME
   307  			defCauslation,              // DEFAULT_COLLATION_NAME
   308  			nil,
   309  		)
   310  		rows = append(rows, record)
   311  	}
   312  	e.rows = rows
   313  }
   314  
   315  func (e *memblockRetriever) setDataForStatistics(ctx stochastikctx.Context, schemas []*perceptron.DBInfo) {
   316  	checker := privilege.GetPrivilegeManager(ctx)
   317  	for _, schemaReplicant := range schemas {
   318  		for _, causet := range schemaReplicant.Blocks {
   319  			if checker != nil && !checker.RequestVerification(ctx.GetStochastikVars().ActiveRoles, schemaReplicant.Name.L, causet.Name.L, "", allegrosql.AllPrivMask) {
   320  				continue
   321  			}
   322  			e.setDataForStatisticsInBlock(schemaReplicant, causet)
   323  		}
   324  	}
   325  }
   326  
   327  func (e *memblockRetriever) setDataForStatisticsInBlock(schemaReplicant *perceptron.DBInfo, causet *perceptron.BlockInfo) {
   328  	var rows [][]types.Causet
   329  	if causet.PKIsHandle {
   330  		for _, defCaus := range causet.DeferredCausets {
   331  			if allegrosql.HasPriKeyFlag(defCaus.Flag) {
   332  				record := types.MakeCausets(
   333  					schemareplicant.CatalogVal, // TABLE_CATALOG
   334  					schemaReplicant.Name.O,     // TABLE_SCHEMA
   335  					causet.Name.O,              // TABLE_NAME
   336  					"0",                        // NON_UNIQUE
   337  					schemaReplicant.Name.O,     // INDEX_SCHEMA
   338  					"PRIMARY",                  // INDEX_NAME
   339  					1,                          // SEQ_IN_INDEX
   340  					defCaus.Name.O,             // COLUMN_NAME
   341  					"A",                        // COLLATION
   342  					0,                          // CARDINALITY
   343  					nil,                        // SUB_PART
   344  					nil,                        // PACKED
   345  					"",                         // NULLABLE
   346  					"BTREE",                    // INDEX_TYPE
   347  					"",                         // COMMENT
   348  					"",                         // INDEX_COMMENT
   349  					"YES",                      // IS_VISIBLE
   350  					nil,                        // Expression
   351  				)
   352  				rows = append(rows, record)
   353  			}
   354  		}
   355  	}
   356  	nameToDefCaus := make(map[string]*perceptron.DeferredCausetInfo, len(causet.DeferredCausets))
   357  	for _, c := range causet.DeferredCausets {
   358  		nameToDefCaus[c.Name.L] = c
   359  	}
   360  	for _, index := range causet.Indices {
   361  		nonUnique := "1"
   362  		if index.Unique {
   363  			nonUnique = "0"
   364  		}
   365  		for i, key := range index.DeferredCausets {
   366  			defCaus := nameToDefCaus[key.Name.L]
   367  			nullable := "YES"
   368  			if allegrosql.HasNotNullFlag(defCaus.Flag) {
   369  				nullable = ""
   370  			}
   371  
   372  			visible := "YES"
   373  			if index.Invisible {
   374  				visible = "NO"
   375  			}
   376  
   377  			defCausName := defCaus.Name.O
   378  			var memex interface{}
   379  			memex = nil
   380  			tblDefCaus := causet.DeferredCausets[defCaus.Offset]
   381  			if tblDefCaus.Hidden {
   382  				defCausName = "NULL"
   383  				memex = fmt.Sprintf("(%s)", tblDefCaus.GeneratedExprString)
   384  			}
   385  
   386  			record := types.MakeCausets(
   387  				schemareplicant.CatalogVal, // TABLE_CATALOG
   388  				schemaReplicant.Name.O,     // TABLE_SCHEMA
   389  				causet.Name.O,              // TABLE_NAME
   390  				nonUnique,                  // NON_UNIQUE
   391  				schemaReplicant.Name.O,     // INDEX_SCHEMA
   392  				index.Name.O,               // INDEX_NAME
   393  				i+1,                        // SEQ_IN_INDEX
   394  				defCausName,                // COLUMN_NAME
   395  				"A",                        // COLLATION
   396  				0,                          // CARDINALITY
   397  				nil,                        // SUB_PART
   398  				nil,                        // PACKED
   399  				nullable,                   // NULLABLE
   400  				"BTREE",                    // INDEX_TYPE
   401  				"",                         // COMMENT
   402  				"",                         // INDEX_COMMENT
   403  				visible,                    // IS_VISIBLE
   404  				memex,                      // Expression
   405  			)
   406  			rows = append(rows, record)
   407  		}
   408  	}
   409  	e.rows = append(e.rows, rows...)
   410  }
   411  
   412  func (e *memblockRetriever) setDataFromBlocks(ctx stochastikctx.Context, schemas []*perceptron.DBInfo) error {
   413  	blockEventsMap, defCausLengthMap, err := blockStatsCache.get(ctx)
   414  	if err != nil {
   415  		return err
   416  	}
   417  
   418  	checker := privilege.GetPrivilegeManager(ctx)
   419  
   420  	var rows [][]types.Causet
   421  	createTimeTp := allegrosql.TypeDatetime
   422  	for _, schemaReplicant := range schemas {
   423  		for _, causet := range schemaReplicant.Blocks {
   424  			defCauslation := causet.DefCauslate
   425  			if defCauslation == "" {
   426  				defCauslation = allegrosql.DefaultDefCauslationName
   427  			}
   428  			createTime := types.NewTime(types.FromGoTime(causet.GetUFIDelateTime()), createTimeTp, types.DefaultFsp)
   429  
   430  			createOptions := ""
   431  
   432  			if causet.IsSequence() {
   433  				continue
   434  			}
   435  
   436  			if checker != nil && !checker.RequestVerification(ctx.GetStochastikVars().ActiveRoles, schemaReplicant.Name.L, causet.Name.L, "", allegrosql.AllPrivMask) {
   437  				continue
   438  			}
   439  			pkType := "NON-CLUSTERED"
   440  			if !causet.IsView() {
   441  				if causet.GetPartitionInfo() != nil {
   442  					createOptions = "partitioned"
   443  				}
   444  				var autoIncID interface{}
   445  				hasAutoIncID, _ := schemareplicant.HasAutoIncrementDeferredCauset(causet)
   446  				if hasAutoIncID {
   447  					autoIncID, err = getAutoIncrementID(ctx, schemaReplicant, causet)
   448  					if err != nil {
   449  						return err
   450  					}
   451  				}
   452  
   453  				var rowCount, dataLength, indexLength uint64
   454  				if causet.GetPartitionInfo() == nil {
   455  					rowCount = blockEventsMap[causet.ID]
   456  					dataLength, indexLength = getDataAndIndexLength(causet, causet.ID, rowCount, defCausLengthMap)
   457  				} else {
   458  					for _, pi := range causet.GetPartitionInfo().Definitions {
   459  						rowCount += blockEventsMap[pi.ID]
   460  						parDataLen, parIndexLen := getDataAndIndexLength(causet, pi.ID, blockEventsMap[pi.ID], defCausLengthMap)
   461  						dataLength += parDataLen
   462  						indexLength += parIndexLen
   463  					}
   464  				}
   465  				avgEventLength := uint64(0)
   466  				if rowCount != 0 {
   467  					avgEventLength = dataLength / rowCount
   468  				}
   469  				blockType := "BASE TABLE"
   470  				if soliton.IsSystemView(schemaReplicant.Name.L) {
   471  					blockType = "SYSTEM VIEW"
   472  				}
   473  				if causet.PKIsHandle {
   474  					pkType = "INT CLUSTERED"
   475  				} else if causet.IsCommonHandle {
   476  					pkType = "COMMON CLUSTERED"
   477  				}
   478  				shardingInfo := schemareplicant.GetShardingInfo(schemaReplicant, causet)
   479  				record := types.MakeCausets(
   480  					schemareplicant.CatalogVal, // TABLE_CATALOG
   481  					schemaReplicant.Name.O,     // TABLE_SCHEMA
   482  					causet.Name.O,              // TABLE_NAME
   483  					blockType,                  // TABLE_TYPE
   484  					"InnoDB",                   // ENGINE
   485  					uint64(10),                 // VERSION
   486  					"Compact",                  // ROW_FORMAT
   487  					rowCount,                   // TABLE_ROWS
   488  					avgEventLength,             // AVG_ROW_LENGTH
   489  					dataLength,                 // DATA_LENGTH
   490  					uint64(0),                  // MAX_DATA_LENGTH
   491  					indexLength,                // INDEX_LENGTH
   492  					uint64(0),                  // DATA_FREE
   493  					autoIncID,                  // AUTO_INCREMENT
   494  					createTime,                 // CREATE_TIME
   495  					nil,                        // UFIDelATE_TIME
   496  					nil,                        // CHECK_TIME
   497  					defCauslation,              // TABLE_COLLATION
   498  					nil,                        // CHECKSUM
   499  					createOptions,              // CREATE_OPTIONS
   500  					causet.Comment,             // TABLE_COMMENT
   501  					causet.ID,                  // MilevaDB_TABLE_ID
   502  					shardingInfo,               // MilevaDB_ROW_ID_SHARDING_INFO
   503  					pkType,                     // MilevaDB_PK_TYPE
   504  				)
   505  				rows = append(rows, record)
   506  			} else {
   507  				record := types.MakeCausets(
   508  					schemareplicant.CatalogVal, // TABLE_CATALOG
   509  					schemaReplicant.Name.O,     // TABLE_SCHEMA
   510  					causet.Name.O,              // TABLE_NAME
   511  					"VIEW",                     // TABLE_TYPE
   512  					nil,                        // ENGINE
   513  					nil,                        // VERSION
   514  					nil,                        // ROW_FORMAT
   515  					nil,                        // TABLE_ROWS
   516  					nil,                        // AVG_ROW_LENGTH
   517  					nil,                        // DATA_LENGTH
   518  					nil,                        // MAX_DATA_LENGTH
   519  					nil,                        // INDEX_LENGTH
   520  					nil,                        // DATA_FREE
   521  					nil,                        // AUTO_INCREMENT
   522  					createTime,                 // CREATE_TIME
   523  					nil,                        // UFIDelATE_TIME
   524  					nil,                        // CHECK_TIME
   525  					nil,                        // TABLE_COLLATION
   526  					nil,                        // CHECKSUM
   527  					nil,                        // CREATE_OPTIONS
   528  					"VIEW",                     // TABLE_COMMENT
   529  					causet.ID,                  // MilevaDB_TABLE_ID
   530  					nil,                        // MilevaDB_ROW_ID_SHARDING_INFO
   531  					pkType,                     // MilevaDB_PK_TYPE
   532  				)
   533  				rows = append(rows, record)
   534  			}
   535  		}
   536  	}
   537  	e.rows = rows
   538  	return nil
   539  }
   540  
   541  func (e *hugeMemBlockRetriever) setDataForDeferredCausets(ctx stochastikctx.Context) error {
   542  	checker := privilege.GetPrivilegeManager(ctx)
   543  	e.rows = e.rows[:0]
   544  	batch := 1024
   545  	for ; e.dbsIdx < len(e.dbs); e.dbsIdx++ {
   546  		schemaReplicant := e.dbs[e.dbsIdx]
   547  		for e.tblIdx < len(schemaReplicant.Blocks) {
   548  			causet := schemaReplicant.Blocks[e.tblIdx]
   549  			e.tblIdx++
   550  			if checker != nil && !checker.RequestVerification(ctx.GetStochastikVars().ActiveRoles, schemaReplicant.Name.L, causet.Name.L, "", allegrosql.AllPrivMask) {
   551  				continue
   552  			}
   553  
   554  			e.dataForDeferredCausetsInBlock(schemaReplicant, causet)
   555  			if len(e.rows) >= batch {
   556  				return nil
   557  			}
   558  		}
   559  		e.tblIdx = 0
   560  	}
   561  	return nil
   562  }
   563  
   564  func (e *hugeMemBlockRetriever) dataForDeferredCausetsInBlock(schemaReplicant *perceptron.DBInfo, tbl *perceptron.BlockInfo) {
   565  	for i, defCaus := range tbl.DeferredCausets {
   566  		if defCaus.Hidden {
   567  			continue
   568  		}
   569  		var charMaxLen, charOctLen, numericPrecision, numericScale, datetimePrecision interface{}
   570  		defCausLen, decimal := defCaus.Flen, defCaus.Decimal
   571  		defaultFlen, defaultDecimal := allegrosql.GetDefaultFieldLengthAndDecimal(defCaus.Tp)
   572  		if decimal == types.UnspecifiedLength {
   573  			decimal = defaultDecimal
   574  		}
   575  		if defCausLen == types.UnspecifiedLength {
   576  			defCausLen = defaultFlen
   577  		}
   578  		if defCaus.Tp == allegrosql.TypeSet {
   579  			// Example: In MyALLEGROSQL set('a','bc','def','ghij') has length 13, because
   580  			// len('a')+len('bc')+len('def')+len('ghij')+len(ThreeComma)=13
   581  			// Reference link: https://bugs.allegrosql.com/bug.php?id=22613
   582  			defCausLen = 0
   583  			for _, ele := range defCaus.Elems {
   584  				defCausLen += len(ele)
   585  			}
   586  			if len(defCaus.Elems) != 0 {
   587  				defCausLen += (len(defCaus.Elems) - 1)
   588  			}
   589  			charMaxLen = defCausLen
   590  			charOctLen = defCausLen
   591  		} else if defCaus.Tp == allegrosql.TypeEnum {
   592  			// Example: In MyALLEGROSQL enum('a', 'ab', 'cdef') has length 4, because
   593  			// the longest string in the enum is 'cdef'
   594  			// Reference link: https://bugs.allegrosql.com/bug.php?id=22613
   595  			defCausLen = 0
   596  			for _, ele := range defCaus.Elems {
   597  				if len(ele) > defCausLen {
   598  					defCausLen = len(ele)
   599  				}
   600  			}
   601  			charMaxLen = defCausLen
   602  			charOctLen = defCausLen
   603  		} else if types.IsString(defCaus.Tp) {
   604  			charMaxLen = defCausLen
   605  			charOctLen = defCausLen
   606  		} else if types.IsTypeFractionable(defCaus.Tp) {
   607  			datetimePrecision = decimal
   608  		} else if types.IsTypeNumeric(defCaus.Tp) {
   609  			numericPrecision = defCausLen
   610  			if defCaus.Tp != allegrosql.TypeFloat && defCaus.Tp != allegrosql.TypeDouble {
   611  				numericScale = decimal
   612  			} else if decimal != -1 {
   613  				numericScale = decimal
   614  			}
   615  		}
   616  		defCausumnType := defCaus.FieldType.SchemaReplicantStr()
   617  		defCausumnDesc := causet.NewDefCausDesc(causet.ToDeferredCauset(defCaus))
   618  		var defCausumnDefault interface{}
   619  		if defCausumnDesc.DefaultValue != nil {
   620  			defCausumnDefault = fmt.Sprintf("%v", defCausumnDesc.DefaultValue)
   621  		}
   622  		record := types.MakeCausets(
   623  			schemareplicant.CatalogVal, // TABLE_CATALOG
   624  			schemaReplicant.Name.O,     // TABLE_SCHEMA
   625  			tbl.Name.O,                 // TABLE_NAME
   626  			defCaus.Name.O,             // COLUMN_NAME
   627  			i+1,                        // ORIGINAL_POSITION
   628  			defCausumnDefault,          // COLUMN_DEFAULT
   629  			defCausumnDesc.Null,        // IS_NULLABLE
   630  			types.TypeToStr(defCaus.Tp, defCaus.Charset), // DATA_TYPE
   631  			charMaxLen,                           // CHARACTER_MAXIMUM_LENGTH
   632  			charOctLen,                           // CHARACTER_OCTET_LENGTH
   633  			numericPrecision,                     // NUMERIC_PRECISION
   634  			numericScale,                         // NUMERIC_SCALE
   635  			datetimePrecision,                    // DATETIME_PRECISION
   636  			defCausumnDesc.Charset,               // CHARACTER_SET_NAME
   637  			defCausumnDesc.DefCauslation,         // COLLATION_NAME
   638  			defCausumnType,                       // COLUMN_TYPE
   639  			defCausumnDesc.Key,                   // COLUMN_KEY
   640  			defCausumnDesc.Extra,                 // EXTRA
   641  			"select,insert,uFIDelate,references", // PRIVILEGES
   642  			defCausumnDesc.Comment,               // COLUMN_COMMENT
   643  			defCaus.GeneratedExprString,          // GENERATION_EXPRESSION
   644  		)
   645  		e.rows = append(e.rows, record)
   646  	}
   647  }
   648  
   649  func (e *memblockRetriever) setDataFromPartitions(ctx stochastikctx.Context, schemas []*perceptron.DBInfo) error {
   650  	blockEventsMap, defCausLengthMap, err := blockStatsCache.get(ctx)
   651  	if err != nil {
   652  		return err
   653  	}
   654  	checker := privilege.GetPrivilegeManager(ctx)
   655  	var rows [][]types.Causet
   656  	createTimeTp := allegrosql.TypeDatetime
   657  	for _, schemaReplicant := range schemas {
   658  		for _, causet := range schemaReplicant.Blocks {
   659  			if checker != nil && !checker.RequestVerification(ctx.GetStochastikVars().ActiveRoles, schemaReplicant.Name.L, causet.Name.L, "", allegrosql.SelectPriv) {
   660  				continue
   661  			}
   662  			createTime := types.NewTime(types.FromGoTime(causet.GetUFIDelateTime()), createTimeTp, types.DefaultFsp)
   663  
   664  			var rowCount, dataLength, indexLength uint64
   665  			if causet.GetPartitionInfo() == nil {
   666  				rowCount = blockEventsMap[causet.ID]
   667  				dataLength, indexLength = getDataAndIndexLength(causet, causet.ID, rowCount, defCausLengthMap)
   668  				avgEventLength := uint64(0)
   669  				if rowCount != 0 {
   670  					avgEventLength = dataLength / rowCount
   671  				}
   672  				record := types.MakeCausets(
   673  					schemareplicant.CatalogVal, // TABLE_CATALOG
   674  					schemaReplicant.Name.O,     // TABLE_SCHEMA
   675  					causet.Name.O,              // TABLE_NAME
   676  					nil,                        // PARTITION_NAME
   677  					nil,                        // SUBPARTITION_NAME
   678  					nil,                        // PARTITION_ORDINAL_POSITION
   679  					nil,                        // SUBPARTITION_ORDINAL_POSITION
   680  					nil,                        // PARTITION_METHOD
   681  					nil,                        // SUBPARTITION_METHOD
   682  					nil,                        // PARTITION_EXPRESSION
   683  					nil,                        // SUBPARTITION_EXPRESSION
   684  					nil,                        // PARTITION_DESCRIPTION
   685  					rowCount,                   // TABLE_ROWS
   686  					avgEventLength,             // AVG_ROW_LENGTH
   687  					dataLength,                 // DATA_LENGTH
   688  					nil,                        // MAX_DATA_LENGTH
   689  					indexLength,                // INDEX_LENGTH
   690  					nil,                        // DATA_FREE
   691  					createTime,                 // CREATE_TIME
   692  					nil,                        // UFIDelATE_TIME
   693  					nil,                        // CHECK_TIME
   694  					nil,                        // CHECKSUM
   695  					nil,                        // PARTITION_COMMENT
   696  					nil,                        // NODEGROUP
   697  					nil,                        // TABLESPACE_NAME
   698  				)
   699  				rows = append(rows, record)
   700  			} else {
   701  				for i, pi := range causet.GetPartitionInfo().Definitions {
   702  					rowCount = blockEventsMap[pi.ID]
   703  					dataLength, indexLength = getDataAndIndexLength(causet, pi.ID, blockEventsMap[pi.ID], defCausLengthMap)
   704  
   705  					avgEventLength := uint64(0)
   706  					if rowCount != 0 {
   707  						avgEventLength = dataLength / rowCount
   708  					}
   709  
   710  					var partitionDesc string
   711  					if causet.Partition.Type == perceptron.PartitionTypeRange {
   712  						partitionDesc = pi.LessThan[0]
   713  					}
   714  
   715  					partitionMethod := causet.Partition.Type.String()
   716  					partitionExpr := causet.Partition.Expr
   717  					if causet.Partition.Type == perceptron.PartitionTypeRange && len(causet.Partition.DeferredCausets) > 0 {
   718  						partitionMethod = "RANGE COLUMNS"
   719  						partitionExpr = causet.Partition.DeferredCausets[0].String()
   720  					}
   721  
   722  					record := types.MakeCausets(
   723  						schemareplicant.CatalogVal, // TABLE_CATALOG
   724  						schemaReplicant.Name.O,     // TABLE_SCHEMA
   725  						causet.Name.O,              // TABLE_NAME
   726  						pi.Name.O,                  // PARTITION_NAME
   727  						nil,                        // SUBPARTITION_NAME
   728  						i+1,                        // PARTITION_ORDINAL_POSITION
   729  						nil,                        // SUBPARTITION_ORDINAL_POSITION
   730  						partitionMethod,            // PARTITION_METHOD
   731  						nil,                        // SUBPARTITION_METHOD
   732  						partitionExpr,              // PARTITION_EXPRESSION
   733  						nil,                        // SUBPARTITION_EXPRESSION
   734  						partitionDesc,              // PARTITION_DESCRIPTION
   735  						rowCount,                   // TABLE_ROWS
   736  						avgEventLength,             // AVG_ROW_LENGTH
   737  						dataLength,                 // DATA_LENGTH
   738  						uint64(0),                  // MAX_DATA_LENGTH
   739  						indexLength,                // INDEX_LENGTH
   740  						uint64(0),                  // DATA_FREE
   741  						createTime,                 // CREATE_TIME
   742  						nil,                        // UFIDelATE_TIME
   743  						nil,                        // CHECK_TIME
   744  						nil,                        // CHECKSUM
   745  						pi.Comment,                 // PARTITION_COMMENT
   746  						nil,                        // NODEGROUP
   747  						nil,                        // TABLESPACE_NAME
   748  					)
   749  					rows = append(rows, record)
   750  				}
   751  			}
   752  		}
   753  	}
   754  	e.rows = rows
   755  	return nil
   756  }
   757  
   758  func (e *memblockRetriever) setDataFromIndexes(ctx stochastikctx.Context, schemas []*perceptron.DBInfo) {
   759  	checker := privilege.GetPrivilegeManager(ctx)
   760  	var rows [][]types.Causet
   761  	for _, schemaReplicant := range schemas {
   762  		for _, tb := range schemaReplicant.Blocks {
   763  			if checker != nil && !checker.RequestVerification(ctx.GetStochastikVars().ActiveRoles, schemaReplicant.Name.L, tb.Name.L, "", allegrosql.AllPrivMask) {
   764  				continue
   765  			}
   766  
   767  			if tb.PKIsHandle {
   768  				var pkDefCaus *perceptron.DeferredCausetInfo
   769  				for _, defCaus := range tb.DefCauss() {
   770  					if allegrosql.HasPriKeyFlag(defCaus.Flag) {
   771  						pkDefCaus = defCaus
   772  						break
   773  					}
   774  				}
   775  				record := types.MakeCausets(
   776  					schemaReplicant.Name.O, // TABLE_SCHEMA
   777  					tb.Name.O,              // TABLE_NAME
   778  					0,                      // NON_UNIQUE
   779  					"PRIMARY",              // KEY_NAME
   780  					1,                      // SEQ_IN_INDEX
   781  					pkDefCaus.Name.O,       // COLUMN_NAME
   782  					nil,                    // SUB_PART
   783  					"",                     // INDEX_COMMENT
   784  					nil,                    // Expression
   785  					0,                      // INDEX_ID
   786  					"YES",                  // IS_VISIBLE
   787  				)
   788  				rows = append(rows, record)
   789  			}
   790  			for _, idxInfo := range tb.Indices {
   791  				if idxInfo.State != perceptron.StatePublic {
   792  					continue
   793  				}
   794  				for i, defCaus := range idxInfo.DeferredCausets {
   795  					nonUniq := 1
   796  					if idxInfo.Unique {
   797  						nonUniq = 0
   798  					}
   799  					var subPart interface{}
   800  					if defCaus.Length != types.UnspecifiedLength {
   801  						subPart = defCaus.Length
   802  					}
   803  					defCausName := defCaus.Name.O
   804  					var memex interface{}
   805  					memex = nil
   806  					tblDefCaus := tb.DeferredCausets[defCaus.Offset]
   807  					if tblDefCaus.Hidden {
   808  						defCausName = "NULL"
   809  						memex = fmt.Sprintf("(%s)", tblDefCaus.GeneratedExprString)
   810  					}
   811  					visible := "YES"
   812  					if idxInfo.Invisible {
   813  						visible = "NO"
   814  					}
   815  					record := types.MakeCausets(
   816  						schemaReplicant.Name.O, // TABLE_SCHEMA
   817  						tb.Name.O,              // TABLE_NAME
   818  						nonUniq,                // NON_UNIQUE
   819  						idxInfo.Name.O,         // KEY_NAME
   820  						i+1,                    // SEQ_IN_INDEX
   821  						defCausName,            // COLUMN_NAME
   822  						subPart,                // SUB_PART
   823  						idxInfo.Comment,        // INDEX_COMMENT
   824  						memex,                  // Expression
   825  						idxInfo.ID,             // INDEX_ID
   826  						visible,                // IS_VISIBLE
   827  					)
   828  					rows = append(rows, record)
   829  				}
   830  			}
   831  		}
   832  	}
   833  	e.rows = rows
   834  }
   835  
   836  func (e *memblockRetriever) setDataFromViews(ctx stochastikctx.Context, schemas []*perceptron.DBInfo) {
   837  	checker := privilege.GetPrivilegeManager(ctx)
   838  	var rows [][]types.Causet
   839  	for _, schemaReplicant := range schemas {
   840  		for _, causet := range schemaReplicant.Blocks {
   841  			if !causet.IsView() {
   842  				continue
   843  			}
   844  			defCauslation := causet.DefCauslate
   845  			charset := causet.Charset
   846  			if defCauslation == "" {
   847  				defCauslation = allegrosql.DefaultDefCauslationName
   848  			}
   849  			if charset == "" {
   850  				charset = allegrosql.DefaultCharset
   851  			}
   852  			if checker != nil && !checker.RequestVerification(ctx.GetStochastikVars().ActiveRoles, schemaReplicant.Name.L, causet.Name.L, "", allegrosql.AllPrivMask) {
   853  				continue
   854  			}
   855  			record := types.MakeCausets(
   856  				schemareplicant.CatalogVal,       // TABLE_CATALOG
   857  				schemaReplicant.Name.O,           // TABLE_SCHEMA
   858  				causet.Name.O,                    // TABLE_NAME
   859  				causet.View.SelectStmt,           // VIEW_DEFINITION
   860  				causet.View.CheckOption.String(), // CHECK_OPTION
   861  				"NO",                             // IS_UFIDelATABLE
   862  				causet.View.Definer.String(),     // DEFINER
   863  				causet.View.Security.String(),    // SECURITY_TYPE
   864  				charset,                          // CHARACTER_SET_CLIENT
   865  				defCauslation,                    // COLLATION_CONNECTION
   866  			)
   867  			rows = append(rows, record)
   868  		}
   869  	}
   870  	e.rows = rows
   871  }
   872  
   873  func (e *memblockRetriever) dataForEinsteinDBStoreStatus(ctx stochastikctx.Context) (err error) {
   874  	einsteindbStore, ok := ctx.GetStore().(einsteindb.CausetStorage)
   875  	if !ok {
   876  		return errors.New("Information about EinsteinDB causetstore status can be gotten only when the storage is EinsteinDB")
   877  	}
   878  	einsteindbHelper := &helper.Helper{
   879  		CausetStore: einsteindbStore,
   880  		RegionCache: einsteindbStore.GetRegionCache(),
   881  	}
   882  	storesStat, err := einsteindbHelper.GetStoresStat()
   883  	if err != nil {
   884  		return err
   885  	}
   886  	for _, storeStat := range storesStat.Stores {
   887  		event := make([]types.Causet, len(schemareplicant.BlockEinsteinDBStoreStatusDefCauss))
   888  		event[0].SetInt64(storeStat.CausetStore.ID)
   889  		event[1].SetString(storeStat.CausetStore.Address, allegrosql.DefaultDefCauslationName)
   890  		event[2].SetInt64(storeStat.CausetStore.State)
   891  		event[3].SetString(storeStat.CausetStore.StateName, allegrosql.DefaultDefCauslationName)
   892  		data, err := json.Marshal(storeStat.CausetStore.Labels)
   893  		if err != nil {
   894  			return err
   895  		}
   896  		bj := binaryJson.BinaryJSON{}
   897  		if err = bj.UnmarshalJSON(data); err != nil {
   898  			return err
   899  		}
   900  		event[4].SetMysqlJSON(bj)
   901  		event[5].SetString(storeStat.CausetStore.Version, allegrosql.DefaultDefCauslationName)
   902  		event[6].SetString(storeStat.Status.Capacity, allegrosql.DefaultDefCauslationName)
   903  		event[7].SetString(storeStat.Status.Available, allegrosql.DefaultDefCauslationName)
   904  		event[8].SetInt64(storeStat.Status.LeaderCount)
   905  		event[9].SetFloat64(storeStat.Status.LeaderWeight)
   906  		event[10].SetFloat64(storeStat.Status.LeaderSembedded)
   907  		event[11].SetInt64(storeStat.Status.LeaderSize)
   908  		event[12].SetInt64(storeStat.Status.RegionCount)
   909  		event[13].SetFloat64(storeStat.Status.RegionWeight)
   910  		event[14].SetFloat64(storeStat.Status.RegionSembedded)
   911  		event[15].SetInt64(storeStat.Status.RegionSize)
   912  		startTs := types.NewTime(types.FromGoTime(storeStat.Status.StartTs), allegrosql.TypeDatetime, types.DefaultFsp)
   913  		event[16].SetMysqlTime(startTs)
   914  		lastHeartbeatTs := types.NewTime(types.FromGoTime(storeStat.Status.LastHeartbeatTs), allegrosql.TypeDatetime, types.DefaultFsp)
   915  		event[17].SetMysqlTime(lastHeartbeatTs)
   916  		event[18].SetString(storeStat.Status.Uptime, allegrosql.DefaultDefCauslationName)
   917  		e.rows = append(e.rows, event)
   918  	}
   919  	return nil
   920  }
   921  
   922  // DBSJobsReaderInterDirc executes DBSJobs information retrieving.
   923  type DBSJobsReaderInterDirc struct {
   924  	baseInterlockingDirectorate
   925  	DBSJobRetriever
   926  
   927  	cacheJobs []*perceptron.Job
   928  	is        schemareplicant.SchemaReplicant
   929  }
   930  
   931  // Open implements the InterlockingDirectorate Next interface.
   932  func (e *DBSJobsReaderInterDirc) Open(ctx context.Context) error {
   933  	if err := e.baseInterlockingDirectorate.Open(ctx); err != nil {
   934  		return err
   935  	}
   936  	txn, err := e.ctx.Txn(true)
   937  	if err != nil {
   938  		return err
   939  	}
   940  	e.DBSJobRetriever.is = e.is
   941  	e.activeRoles = e.ctx.GetStochastikVars().ActiveRoles
   942  	err = e.DBSJobRetriever.initial(txn)
   943  	if err != nil {
   944  		return err
   945  	}
   946  	return nil
   947  }
   948  
   949  // Next implements the InterlockingDirectorate Next interface.
   950  func (e *DBSJobsReaderInterDirc) Next(ctx context.Context, req *chunk.Chunk) error {
   951  	req.GrowAndReset(e.maxChunkSize)
   952  	checker := privilege.GetPrivilegeManager(e.ctx)
   953  	count := 0
   954  
   955  	// Append running DBS jobs.
   956  	if e.cursor < len(e.runningJobs) {
   957  		num := mathutil.Min(req.Capacity(), len(e.runningJobs)-e.cursor)
   958  		for i := e.cursor; i < e.cursor+num; i++ {
   959  			e.appendJobToChunk(req, e.runningJobs[i], checker)
   960  			req.AppendString(11, e.runningJobs[i].Query)
   961  		}
   962  		e.cursor += num
   963  		count += num
   964  	}
   965  	var err error
   966  
   967  	// Append history DBS jobs.
   968  	if count < req.Capacity() {
   969  		e.cacheJobs, err = e.historyJobIter.GetLastJobs(req.Capacity()-count, e.cacheJobs)
   970  		if err != nil {
   971  			return err
   972  		}
   973  		for _, job := range e.cacheJobs {
   974  			e.appendJobToChunk(req, job, checker)
   975  			req.AppendString(11, job.Query)
   976  		}
   977  		e.cursor += len(e.cacheJobs)
   978  	}
   979  	return nil
   980  }
   981  
   982  func (e *memblockRetriever) setDataFromEngines() {
   983  	var rows [][]types.Causet
   984  	rows = append(rows,
   985  		types.MakeCausets(
   986  			"InnoDB",  // Engine
   987  			"DEFAULT", // Support
   988  			"Supports transactions, event-level locking, and foreign keys", // Comment
   989  			"YES", // Transactions
   990  			"YES", // XA
   991  			"YES", // Savepoints
   992  		),
   993  	)
   994  	e.rows = rows
   995  }
   996  
   997  func (e *memblockRetriever) setDataFromCharacterSets() {
   998  	var rows [][]types.Causet
   999  	charsets := charset.GetSupportedCharsets()
  1000  	for _, charset := range charsets {
  1001  		rows = append(rows,
  1002  			types.MakeCausets(charset.Name, charset.DefaultDefCauslation, charset.Desc, charset.Maxlen),
  1003  		)
  1004  	}
  1005  	e.rows = rows
  1006  }
  1007  
  1008  func (e *memblockRetriever) setDataFromDefCauslations() {
  1009  	var rows [][]types.Causet
  1010  	defCauslations := defCauslate.GetSupportedDefCauslations()
  1011  	for _, defCauslation := range defCauslations {
  1012  		isDefault := ""
  1013  		if defCauslation.IsDefault {
  1014  			isDefault = "Yes"
  1015  		}
  1016  		rows = append(rows,
  1017  			types.MakeCausets(defCauslation.Name, defCauslation.CharsetName, defCauslation.ID, isDefault, "Yes", 1),
  1018  		)
  1019  	}
  1020  	e.rows = rows
  1021  }
  1022  
  1023  func (e *memblockRetriever) dataForDefCauslationCharacterSetApplicability() {
  1024  	var rows [][]types.Causet
  1025  	defCauslations := defCauslate.GetSupportedDefCauslations()
  1026  	for _, defCauslation := range defCauslations {
  1027  		rows = append(rows,
  1028  			types.MakeCausets(defCauslation.Name, defCauslation.CharsetName),
  1029  		)
  1030  	}
  1031  	e.rows = rows
  1032  }
  1033  
  1034  func (e *memblockRetriever) dataForMilevaDBClusterInfo(ctx stochastikctx.Context) error {
  1035  	servers, err := schemareplicant.GetClusterServerInfo(ctx)
  1036  	if err != nil {
  1037  		e.rows = nil
  1038  		return err
  1039  	}
  1040  	rows := make([][]types.Causet, 0, len(servers))
  1041  	for _, server := range servers {
  1042  		startTimeStr := ""
  1043  		upTimeStr := ""
  1044  		if server.StartTimestamp > 0 {
  1045  			startTime := time.Unix(server.StartTimestamp, 0)
  1046  			startTimeStr = startTime.Format(time.RFC3339)
  1047  			upTimeStr = time.Since(startTime).String()
  1048  		}
  1049  		event := types.MakeCausets(
  1050  			server.ServerType,
  1051  			server.Address,
  1052  			server.StatusAddr,
  1053  			server.Version,
  1054  			server.GitHash,
  1055  			startTimeStr,
  1056  			upTimeStr,
  1057  		)
  1058  		rows = append(rows, event)
  1059  	}
  1060  	e.rows = rows
  1061  	return nil
  1062  }
  1063  
  1064  func (e *memblockRetriever) setDataFromKeyDeferredCausetUsage(ctx stochastikctx.Context, schemas []*perceptron.DBInfo) {
  1065  	checker := privilege.GetPrivilegeManager(ctx)
  1066  	rows := make([][]types.Causet, 0, len(schemas)) // The capacity is not accurate, but it is not a big problem.
  1067  	for _, schemaReplicant := range schemas {
  1068  		for _, causet := range schemaReplicant.Blocks {
  1069  			if checker != nil && !checker.RequestVerification(ctx.GetStochastikVars().ActiveRoles, schemaReplicant.Name.L, causet.Name.L, "", allegrosql.AllPrivMask) {
  1070  				continue
  1071  			}
  1072  			rs := keyDeferredCausetUsageInBlock(schemaReplicant, causet)
  1073  			rows = append(rows, rs...)
  1074  		}
  1075  	}
  1076  	e.rows = rows
  1077  }
  1078  
  1079  func (e *memblockRetriever) setDataForClusterProcessList(ctx stochastikctx.Context) error {
  1080  	e.setDataForProcessList(ctx)
  1081  	rows, err := schemareplicant.AppendHostInfoToEvents(e.rows)
  1082  	if err != nil {
  1083  		return err
  1084  	}
  1085  	e.rows = rows
  1086  	return nil
  1087  }
  1088  
  1089  func (e *memblockRetriever) setDataForProcessList(ctx stochastikctx.Context) {
  1090  	sm := ctx.GetStochastikManager()
  1091  	if sm == nil {
  1092  		return
  1093  	}
  1094  
  1095  	loginUser := ctx.GetStochastikVars().User
  1096  	var hasProcessPriv bool
  1097  	if pm := privilege.GetPrivilegeManager(ctx); pm != nil {
  1098  		if pm.RequestVerification(ctx.GetStochastikVars().ActiveRoles, "", "", "", allegrosql.ProcessPriv) {
  1099  			hasProcessPriv = true
  1100  		}
  1101  	}
  1102  
  1103  	pl := sm.ShowProcessList()
  1104  
  1105  	records := make([][]types.Causet, 0, len(pl))
  1106  	for _, pi := range pl {
  1107  		// If you have the PROCESS privilege, you can see all threads.
  1108  		// Otherwise, you can see only your own threads.
  1109  		if !hasProcessPriv && loginUser != nil && pi.User != loginUser.Username {
  1110  			continue
  1111  		}
  1112  
  1113  		rows := pi.ToEvent(ctx.GetStochastikVars().StmtCtx.TimeZone)
  1114  		record := types.MakeCausets(rows...)
  1115  		records = append(records, record)
  1116  	}
  1117  	e.rows = records
  1118  }
  1119  
  1120  func (e *memblockRetriever) setDataFromUserPrivileges(ctx stochastikctx.Context) {
  1121  	pm := privilege.GetPrivilegeManager(ctx)
  1122  	e.rows = pm.UserPrivilegesBlock()
  1123  }
  1124  
  1125  func (e *memblockRetriever) setDataForMetricBlocks(ctx stochastikctx.Context) {
  1126  	var rows [][]types.Causet
  1127  	blocks := make([]string, 0, len(schemareplicant.MetricBlockMap))
  1128  	for name := range schemareplicant.MetricBlockMap {
  1129  		blocks = append(blocks, name)
  1130  	}
  1131  	sort.Strings(blocks)
  1132  	for _, name := range blocks {
  1133  		schemaReplicant := schemareplicant.MetricBlockMap[name]
  1134  		record := types.MakeCausets(
  1135  			name,                   // METRICS_NAME
  1136  			schemaReplicant.PromQL, // PROMQL
  1137  			strings.Join(schemaReplicant.Labels, ","), // LABELS
  1138  			schemaReplicant.Quantile,                  // QUANTILE
  1139  			schemaReplicant.Comment,                   // COMMENT
  1140  		)
  1141  		rows = append(rows, record)
  1142  	}
  1143  	e.rows = rows
  1144  }
  1145  
  1146  func keyDeferredCausetUsageInBlock(schemaReplicant *perceptron.DBInfo, causet *perceptron.BlockInfo) [][]types.Causet {
  1147  	var rows [][]types.Causet
  1148  	if causet.PKIsHandle {
  1149  		for _, defCaus := range causet.DeferredCausets {
  1150  			if allegrosql.HasPriKeyFlag(defCaus.Flag) {
  1151  				record := types.MakeCausets(
  1152  					schemareplicant.CatalogVal,        // CONSTRAINT_CATALOG
  1153  					schemaReplicant.Name.O,            // CONSTRAINT_SCHEMA
  1154  					schemareplicant.PrimaryConstraint, // CONSTRAINT_NAME
  1155  					schemareplicant.CatalogVal,        // TABLE_CATALOG
  1156  					schemaReplicant.Name.O,            // TABLE_SCHEMA
  1157  					causet.Name.O,                     // TABLE_NAME
  1158  					defCaus.Name.O,                    // COLUMN_NAME
  1159  					1,                                 // ORDINAL_POSITION
  1160  					1,                                 // POSITION_IN_UNIQUE_CONSTRAINT
  1161  					nil,                               // REFERENCED_TABLE_SCHEMA
  1162  					nil,                               // REFERENCED_TABLE_NAME
  1163  					nil,                               // REFERENCED_COLUMN_NAME
  1164  				)
  1165  				rows = append(rows, record)
  1166  				break
  1167  			}
  1168  		}
  1169  	}
  1170  	nameToDefCaus := make(map[string]*perceptron.DeferredCausetInfo, len(causet.DeferredCausets))
  1171  	for _, c := range causet.DeferredCausets {
  1172  		nameToDefCaus[c.Name.L] = c
  1173  	}
  1174  	for _, index := range causet.Indices {
  1175  		var idxName string
  1176  		if index.Primary {
  1177  			idxName = schemareplicant.PrimaryConstraint
  1178  		} else if index.Unique {
  1179  			idxName = index.Name.O
  1180  		} else {
  1181  			// Only handle unique/primary key
  1182  			continue
  1183  		}
  1184  		for i, key := range index.DeferredCausets {
  1185  			defCaus := nameToDefCaus[key.Name.L]
  1186  			record := types.MakeCausets(
  1187  				schemareplicant.CatalogVal, // CONSTRAINT_CATALOG
  1188  				schemaReplicant.Name.O,     // CONSTRAINT_SCHEMA
  1189  				idxName,                    // CONSTRAINT_NAME
  1190  				schemareplicant.CatalogVal, // TABLE_CATALOG
  1191  				schemaReplicant.Name.O,     // TABLE_SCHEMA
  1192  				causet.Name.O,              // TABLE_NAME
  1193  				defCaus.Name.O,             // COLUMN_NAME
  1194  				i+1,                        // ORDINAL_POSITION,
  1195  				nil,                        // POSITION_IN_UNIQUE_CONSTRAINT
  1196  				nil,                        // REFERENCED_TABLE_SCHEMA
  1197  				nil,                        // REFERENCED_TABLE_NAME
  1198  				nil,                        // REFERENCED_COLUMN_NAME
  1199  			)
  1200  			rows = append(rows, record)
  1201  		}
  1202  	}
  1203  	for _, fk := range causet.ForeignKeys {
  1204  		fkRefDefCaus := ""
  1205  		if len(fk.RefDefCauss) > 0 {
  1206  			fkRefDefCaus = fk.RefDefCauss[0].O
  1207  		}
  1208  		for i, key := range fk.DefCauss {
  1209  			defCaus := nameToDefCaus[key.L]
  1210  			record := types.MakeCausets(
  1211  				schemareplicant.CatalogVal, // CONSTRAINT_CATALOG
  1212  				schemaReplicant.Name.O,     // CONSTRAINT_SCHEMA
  1213  				fk.Name.O,                  // CONSTRAINT_NAME
  1214  				schemareplicant.CatalogVal, // TABLE_CATALOG
  1215  				schemaReplicant.Name.O,     // TABLE_SCHEMA
  1216  				causet.Name.O,              // TABLE_NAME
  1217  				defCaus.Name.O,             // COLUMN_NAME
  1218  				i+1,                        // ORDINAL_POSITION,
  1219  				1,                          // POSITION_IN_UNIQUE_CONSTRAINT
  1220  				schemaReplicant.Name.O,     // REFERENCED_TABLE_SCHEMA
  1221  				fk.RefBlock.O,              // REFERENCED_TABLE_NAME
  1222  				fkRefDefCaus,               // REFERENCED_COLUMN_NAME
  1223  			)
  1224  			rows = append(rows, record)
  1225  		}
  1226  	}
  1227  	return rows
  1228  }
  1229  
  1230  func (e *memblockRetriever) setDataForEinsteinDBRegionStatus(ctx stochastikctx.Context) error {
  1231  	einsteindbStore, ok := ctx.GetStore().(einsteindb.CausetStorage)
  1232  	if !ok {
  1233  		return errors.New("Information about EinsteinDB region status can be gotten only when the storage is EinsteinDB")
  1234  	}
  1235  	einsteindbHelper := &helper.Helper{
  1236  		CausetStore: einsteindbStore,
  1237  		RegionCache: einsteindbStore.GetRegionCache(),
  1238  	}
  1239  	regionsInfo, err := einsteindbHelper.GetRegionsInfo()
  1240  	if err != nil {
  1241  		return err
  1242  	}
  1243  	allSchemas := ctx.GetStochastikVars().TxnCtx.SchemaReplicant.(schemareplicant.SchemaReplicant).AllSchemas()
  1244  	blockInfos := einsteindbHelper.GetRegionsBlockInfo(regionsInfo, allSchemas)
  1245  	for _, region := range regionsInfo.Regions {
  1246  		blockList := blockInfos[region.ID]
  1247  		if len(blockList) == 0 {
  1248  			e.setNewEinsteinDBRegionStatusDefCaus(&region, nil)
  1249  		}
  1250  		for _, causet := range blockList {
  1251  			e.setNewEinsteinDBRegionStatusDefCaus(&region, &causet)
  1252  		}
  1253  	}
  1254  	return nil
  1255  }
  1256  
  1257  func (e *memblockRetriever) setNewEinsteinDBRegionStatusDefCaus(region *helper.RegionInfo, causet *helper.BlockInfo) {
  1258  	event := make([]types.Causet, len(schemareplicant.BlockEinsteinDBRegionStatusDefCauss))
  1259  	event[0].SetInt64(region.ID)
  1260  	event[1].SetString(region.StartKey, allegrosql.DefaultDefCauslationName)
  1261  	event[2].SetString(region.EndKey, allegrosql.DefaultDefCauslationName)
  1262  	if causet != nil {
  1263  		event[3].SetInt64(causet.Block.ID)
  1264  		event[4].SetString(causet.EDB.Name.O, allegrosql.DefaultDefCauslationName)
  1265  		event[5].SetString(causet.Block.Name.O, allegrosql.DefaultDefCauslationName)
  1266  		if causet.IsIndex {
  1267  			event[6].SetInt64(1)
  1268  			event[7].SetInt64(causet.Index.ID)
  1269  			event[8].SetString(causet.Index.Name.O, allegrosql.DefaultDefCauslationName)
  1270  		} else {
  1271  			event[6].SetInt64(0)
  1272  		}
  1273  	}
  1274  	event[9].SetInt64(region.Epoch.ConfVer)
  1275  	event[10].SetInt64(region.Epoch.Version)
  1276  	event[11].SetInt64(region.WrittenBytes)
  1277  	event[12].SetInt64(region.ReadBytes)
  1278  	event[13].SetInt64(region.ApproximateSize)
  1279  	event[14].SetInt64(region.ApproximateKeys)
  1280  	if region.ReplicationStatus != nil {
  1281  		event[15].SetString(region.ReplicationStatus.State, allegrosql.DefaultDefCauslationName)
  1282  		event[16].SetInt64(region.ReplicationStatus.StateID)
  1283  	}
  1284  	e.rows = append(e.rows, event)
  1285  }
  1286  
  1287  func (e *memblockRetriever) setDataForEinsteinDBRegionPeers(ctx stochastikctx.Context) error {
  1288  	einsteindbStore, ok := ctx.GetStore().(einsteindb.CausetStorage)
  1289  	if !ok {
  1290  		return errors.New("Information about EinsteinDB region status can be gotten only when the storage is EinsteinDB")
  1291  	}
  1292  	einsteindbHelper := &helper.Helper{
  1293  		CausetStore: einsteindbStore,
  1294  		RegionCache: einsteindbStore.GetRegionCache(),
  1295  	}
  1296  	regionsInfo, err := einsteindbHelper.GetRegionsInfo()
  1297  	if err != nil {
  1298  		return err
  1299  	}
  1300  	for _, region := range regionsInfo.Regions {
  1301  		e.setNewEinsteinDBRegionPeersDefCauss(&region)
  1302  	}
  1303  	return nil
  1304  }
  1305  
  1306  func (e *memblockRetriever) setNewEinsteinDBRegionPeersDefCauss(region *helper.RegionInfo) {
  1307  	records := make([][]types.Causet, 0, len(region.Peers))
  1308  	pendingPeerIDSet := set.NewInt64Set()
  1309  	for _, peer := range region.PendingPeers {
  1310  		pendingPeerIDSet.Insert(peer.ID)
  1311  	}
  1312  	downPeerMap := make(map[int64]int64, len(region.DownPeers))
  1313  	for _, peerStat := range region.DownPeers {
  1314  		downPeerMap[peerStat.ID] = peerStat.DownSec
  1315  	}
  1316  	for _, peer := range region.Peers {
  1317  		event := make([]types.Causet, len(schemareplicant.BlockEinsteinDBRegionPeersDefCauss))
  1318  		event[0].SetInt64(region.ID)
  1319  		event[1].SetInt64(peer.ID)
  1320  		event[2].SetInt64(peer.StoreID)
  1321  		if peer.IsLearner {
  1322  			event[3].SetInt64(1)
  1323  		} else {
  1324  			event[3].SetInt64(0)
  1325  		}
  1326  		if peer.ID == region.Leader.ID {
  1327  			event[4].SetInt64(1)
  1328  		} else {
  1329  			event[4].SetInt64(0)
  1330  		}
  1331  		if pendingPeerIDSet.Exist(peer.ID) {
  1332  			event[5].SetString(pendingPeer, allegrosql.DefaultDefCauslationName)
  1333  		} else if downSec, ok := downPeerMap[peer.ID]; ok {
  1334  			event[5].SetString(downPeer, allegrosql.DefaultDefCauslationName)
  1335  			event[6].SetInt64(downSec)
  1336  		} else {
  1337  			event[5].SetString(normalPeer, allegrosql.DefaultDefCauslationName)
  1338  		}
  1339  		records = append(records, event)
  1340  	}
  1341  	e.rows = append(e.rows, records...)
  1342  }
  1343  
  1344  const (
  1345  	normalPeer  = "NORMAL"
  1346  	pendingPeer = "PENDING"
  1347  	downPeer    = "DOWN"
  1348  )
  1349  
  1350  func (e *memblockRetriever) setDataForMilevaDBHotRegions(ctx stochastikctx.Context) error {
  1351  	einsteindbStore, ok := ctx.GetStore().(einsteindb.CausetStorage)
  1352  	if !ok {
  1353  		return errors.New("Information about hot region can be gotten only when the storage is EinsteinDB")
  1354  	}
  1355  	allSchemas := ctx.GetStochastikVars().TxnCtx.SchemaReplicant.(schemareplicant.SchemaReplicant).AllSchemas()
  1356  	einsteindbHelper := &helper.Helper{
  1357  		CausetStore: einsteindbStore,
  1358  		RegionCache: einsteindbStore.GetRegionCache(),
  1359  	}
  1360  	metrics, err := einsteindbHelper.ScrapeHotInfo(FIDelapi.HotRead, allSchemas)
  1361  	if err != nil {
  1362  		return err
  1363  	}
  1364  	e.setDataForHotRegionByMetrics(metrics, "read")
  1365  	metrics, err = einsteindbHelper.ScrapeHotInfo(FIDelapi.HotWrite, allSchemas)
  1366  	if err != nil {
  1367  		return err
  1368  	}
  1369  	e.setDataForHotRegionByMetrics(metrics, "write")
  1370  	return nil
  1371  }
  1372  
  1373  func (e *memblockRetriever) setDataForHotRegionByMetrics(metrics []helper.HotBlockIndex, tp string) {
  1374  	rows := make([][]types.Causet, 0, len(metrics))
  1375  	for _, tblIndex := range metrics {
  1376  		event := make([]types.Causet, len(schemareplicant.BlockMilevaDBHotRegionsDefCauss))
  1377  		if tblIndex.IndexName != "" {
  1378  			event[1].SetInt64(tblIndex.IndexID)
  1379  			event[4].SetString(tblIndex.IndexName, allegrosql.DefaultDefCauslationName)
  1380  		} else {
  1381  			event[1].SetNull()
  1382  			event[4].SetNull()
  1383  		}
  1384  		event[0].SetInt64(tblIndex.BlockID)
  1385  		event[2].SetString(tblIndex.DbName, allegrosql.DefaultDefCauslationName)
  1386  		event[3].SetString(tblIndex.BlockName, allegrosql.DefaultDefCauslationName)
  1387  		event[5].SetUint64(tblIndex.RegionID)
  1388  		event[6].SetString(tp, allegrosql.DefaultDefCauslationName)
  1389  		if tblIndex.RegionMetric == nil {
  1390  			event[7].SetNull()
  1391  			event[8].SetNull()
  1392  		} else {
  1393  			event[7].SetInt64(int64(tblIndex.RegionMetric.MaxHotDegree))
  1394  			event[8].SetInt64(int64(tblIndex.RegionMetric.Count))
  1395  		}
  1396  		event[9].SetUint64(tblIndex.RegionMetric.FlowBytes)
  1397  		rows = append(rows, event)
  1398  	}
  1399  	e.rows = append(e.rows, rows...)
  1400  }
  1401  
  1402  // setDataFromBlockConstraints constructs data for causet information_schema.constraints.See https://dev.allegrosql.com/doc/refman/5.7/en/causet-constraints-causet.html
  1403  func (e *memblockRetriever) setDataFromBlockConstraints(ctx stochastikctx.Context, schemas []*perceptron.DBInfo) {
  1404  	checker := privilege.GetPrivilegeManager(ctx)
  1405  	var rows [][]types.Causet
  1406  	for _, schemaReplicant := range schemas {
  1407  		for _, tbl := range schemaReplicant.Blocks {
  1408  			if checker != nil && !checker.RequestVerification(ctx.GetStochastikVars().ActiveRoles, schemaReplicant.Name.L, tbl.Name.L, "", allegrosql.AllPrivMask) {
  1409  				continue
  1410  			}
  1411  
  1412  			if tbl.PKIsHandle {
  1413  				record := types.MakeCausets(
  1414  					schemareplicant.CatalogVal,     // CONSTRAINT_CATALOG
  1415  					schemaReplicant.Name.O,         // CONSTRAINT_SCHEMA
  1416  					allegrosql.PrimaryKeyName,      // CONSTRAINT_NAME
  1417  					schemaReplicant.Name.O,         // TABLE_SCHEMA
  1418  					tbl.Name.O,                     // TABLE_NAME
  1419  					schemareplicant.PrimaryKeyType, // CONSTRAINT_TYPE
  1420  				)
  1421  				rows = append(rows, record)
  1422  			}
  1423  
  1424  			for _, idx := range tbl.Indices {
  1425  				var cname, ctype string
  1426  				if idx.Primary {
  1427  					cname = allegrosql.PrimaryKeyName
  1428  					ctype = schemareplicant.PrimaryKeyType
  1429  				} else if idx.Unique {
  1430  					cname = idx.Name.O
  1431  					ctype = schemareplicant.UniqueKeyType
  1432  				} else {
  1433  					// The index has no constriant.
  1434  					continue
  1435  				}
  1436  				record := types.MakeCausets(
  1437  					schemareplicant.CatalogVal, // CONSTRAINT_CATALOG
  1438  					schemaReplicant.Name.O,     // CONSTRAINT_SCHEMA
  1439  					cname,                      // CONSTRAINT_NAME
  1440  					schemaReplicant.Name.O,     // TABLE_SCHEMA
  1441  					tbl.Name.O,                 // TABLE_NAME
  1442  					ctype,                      // CONSTRAINT_TYPE
  1443  				)
  1444  				rows = append(rows, record)
  1445  			}
  1446  		}
  1447  	}
  1448  	e.rows = rows
  1449  }
  1450  
  1451  // blockStorageStatsRetriever is used to read slow log data.
  1452  type blockStorageStatsRetriever struct {
  1453  	dummyCloser
  1454  	causet         *perceptron.BlockInfo
  1455  	outputDefCauss []*perceptron.DeferredCausetInfo
  1456  	retrieved      bool
  1457  	initialized    bool
  1458  	extractor      *causetembedded.BlockStorageStatsExtractor
  1459  	initialBlocks  []*initialBlock
  1460  	curBlock       int
  1461  	helper         *helper.Helper
  1462  	stats          helper.FIDelRegionStats
  1463  }
  1464  
  1465  func (e *blockStorageStatsRetriever) retrieve(ctx context.Context, sctx stochastikctx.Context) ([][]types.Causet, error) {
  1466  	if e.retrieved {
  1467  		return nil, nil
  1468  	}
  1469  	if !e.initialized {
  1470  		err := e.initialize(sctx)
  1471  		if err != nil {
  1472  			return nil, err
  1473  		}
  1474  	}
  1475  	if len(e.initialBlocks) == 0 || e.curBlock >= len(e.initialBlocks) {
  1476  		e.retrieved = true
  1477  		return nil, nil
  1478  	}
  1479  
  1480  	rows, err := e.setDataForBlockStorageStats(sctx)
  1481  	if err != nil {
  1482  		return nil, err
  1483  	}
  1484  	if len(e.outputDefCauss) == len(e.causet.DeferredCausets) {
  1485  		return rows, nil
  1486  	}
  1487  	retEvents := make([][]types.Causet, len(rows))
  1488  	for i, fullEvent := range rows {
  1489  		event := make([]types.Causet, len(e.outputDefCauss))
  1490  		for j, defCaus := range e.outputDefCauss {
  1491  			event[j] = fullEvent[defCaus.Offset]
  1492  		}
  1493  		retEvents[i] = event
  1494  	}
  1495  	return retEvents, nil
  1496  }
  1497  
  1498  type initialBlock struct {
  1499  	EDB string
  1500  	*perceptron.BlockInfo
  1501  }
  1502  
  1503  func (e *blockStorageStatsRetriever) initialize(sctx stochastikctx.Context) error {
  1504  	is := schemareplicant.GetSchemaReplicant(sctx)
  1505  	var databases []string
  1506  	schemas := e.extractor.BlockSchema
  1507  	blocks := e.extractor.BlockName
  1508  
  1509  	// If not specify the block_schema, return an error to avoid traverse all schemas and their blocks.
  1510  	if len(schemas) == 0 {
  1511  		return errors.Errorf("Please specify the 'block_schema'")
  1512  	}
  1513  
  1514  	// Filter the sys or memory schemaReplicant.
  1515  	for schemaReplicant := range schemas {
  1516  		if !soliton.IsMemOrSysDB(schemaReplicant) {
  1517  			databases = append(databases, schemaReplicant)
  1518  		}
  1519  	}
  1520  
  1521  	// Extract the blocks to the initialBlock.
  1522  	for _, EDB := range databases {
  1523  		// The user didn't specified the causet, extract all blocks of this EDB to initialBlock.
  1524  		if len(blocks) == 0 {
  1525  			tbs := is.SchemaBlocks(perceptron.NewCIStr(EDB))
  1526  			for _, tb := range tbs {
  1527  				e.initialBlocks = append(e.initialBlocks, &initialBlock{EDB, tb.Meta()})
  1528  			}
  1529  		} else {
  1530  			// The user specified the causet, extract the specified blocks of this EDB to initialBlock.
  1531  			for tb := range blocks {
  1532  				if tb, err := is.BlockByName(perceptron.NewCIStr(EDB), perceptron.NewCIStr(tb)); err == nil {
  1533  					e.initialBlocks = append(e.initialBlocks, &initialBlock{EDB, tb.Meta()})
  1534  				}
  1535  			}
  1536  		}
  1537  	}
  1538  
  1539  	// Cache the helper and return an error if FIDel unavailable.
  1540  	einsteindbStore, ok := sctx.GetStore().(einsteindb.CausetStorage)
  1541  	if !ok {
  1542  		return errors.Errorf("Information about EinsteinDB region status can be gotten only when the storage is EinsteinDB")
  1543  	}
  1544  	e.helper = helper.NewHelper(einsteindbStore)
  1545  	_, err := e.helper.GetFIDelAddr()
  1546  	if err != nil {
  1547  		return err
  1548  	}
  1549  	e.initialized = true
  1550  	return nil
  1551  }
  1552  
  1553  func (e *blockStorageStatsRetriever) setDataForBlockStorageStats(ctx stochastikctx.Context) ([][]types.Causet, error) {
  1554  	rows := make([][]types.Causet, 0, 1024)
  1555  	count := 0
  1556  	for e.curBlock < len(e.initialBlocks) && count < 1024 {
  1557  		causet := e.initialBlocks[e.curBlock]
  1558  		blockID := causet.ID
  1559  		err := e.helper.GetFIDelRegionStats(blockID, &e.stats)
  1560  		if err != nil {
  1561  			return nil, err
  1562  		}
  1563  		peerCount := len(e.stats.StorePeerCount)
  1564  
  1565  		record := types.MakeCausets(
  1566  			causet.EDB,          // TABLE_SCHEMA
  1567  			causet.Name.O,       // TABLE_NAME
  1568  			blockID,             // TABLE_ID
  1569  			peerCount,           // TABLE_PEER_COUNT
  1570  			e.stats.Count,       // TABLE_REGION_COUNT
  1571  			e.stats.EmptyCount,  // TABLE_EMPTY_REGION_COUNT
  1572  			e.stats.StorageSize, // TABLE_SIZE
  1573  			e.stats.StorageKeys, // TABLE_KEYS
  1574  		)
  1575  		rows = append(rows, record)
  1576  		count++
  1577  		e.curBlock++
  1578  	}
  1579  	return rows, nil
  1580  }
  1581  
  1582  func (e *memblockRetriever) setDataFromStochastikVar(ctx stochastikctx.Context) error {
  1583  	var rows [][]types.Causet
  1584  	var err error
  1585  	stochastikVars := ctx.GetStochastikVars()
  1586  	for _, v := range variable.SysVars {
  1587  		var value string
  1588  		value, err = variable.GetStochastikSystemVar(stochastikVars, v.Name)
  1589  		if err != nil {
  1590  			return err
  1591  		}
  1592  		event := types.MakeCausets(v.Name, value)
  1593  		rows = append(rows, event)
  1594  	}
  1595  	e.rows = rows
  1596  	return nil
  1597  }
  1598  
  1599  // dataForAnalyzeStatusHelper is a helper function which can be used in show_stats.go
  1600  func dataForAnalyzeStatusHelper(sctx stochastikctx.Context) (rows [][]types.Causet) {
  1601  	checker := privilege.GetPrivilegeManager(sctx)
  1602  	for _, job := range statistics.GetAllAnalyzeJobs() {
  1603  		job.Lock()
  1604  		var startTime interface{}
  1605  		if job.StartTime.IsZero() {
  1606  			startTime = nil
  1607  		} else {
  1608  			startTime = types.NewTime(types.FromGoTime(job.StartTime), allegrosql.TypeDatetime, 0)
  1609  		}
  1610  		if checker == nil || checker.RequestVerification(sctx.GetStochastikVars().ActiveRoles, job.DBName, job.BlockName, "", allegrosql.AllPrivMask) {
  1611  			rows = append(rows, types.MakeCausets(
  1612  				job.DBName,        // TABLE_SCHEMA
  1613  				job.BlockName,     // TABLE_NAME
  1614  				job.PartitionName, // PARTITION_NAME
  1615  				job.JobInfo,       // JOB_INFO
  1616  				job.EventCount,    // ROW_COUNT
  1617  				startTime,         // START_TIME
  1618  				job.State,         // STATE
  1619  			))
  1620  		}
  1621  		job.Unlock()
  1622  	}
  1623  	return
  1624  }
  1625  
  1626  // setDataForAnalyzeStatus gets all the analyze jobs.
  1627  func (e *memblockRetriever) setDataForAnalyzeStatus(sctx stochastikctx.Context) {
  1628  	e.rows = dataForAnalyzeStatusHelper(sctx)
  1629  }
  1630  
  1631  // setDataForPseudoProfiling returns pseudo data for causet profiling when system variable `profiling` is set to `ON`.
  1632  func (e *memblockRetriever) setDataForPseudoProfiling(sctx stochastikctx.Context) {
  1633  	if v, ok := sctx.GetStochastikVars().GetSystemVar("profiling"); ok && variable.MilevaDBOptOn(v) {
  1634  		event := types.MakeCausets(
  1635  			0,                      // QUERY_ID
  1636  			0,                      // SEQ
  1637  			"",                     // STATE
  1638  			types.NewDecFromInt(0), // DURATION
  1639  			types.NewDecFromInt(0), // CPU_USER
  1640  			types.NewDecFromInt(0), // CPU_SYSTEM
  1641  			0,                      // CONTEXT_VOLUNTARY
  1642  			0,                      // CONTEXT_INVOLUNTARY
  1643  			0,                      // BLOCK_OPS_IN
  1644  			0,                      // BLOCK_OPS_OUT
  1645  			0,                      // MESSAGES_SENT
  1646  			0,                      // MESSAGES_RECEIVED
  1647  			0,                      // PAGE_FAULTS_MAJOR
  1648  			0,                      // PAGE_FAULTS_MINOR
  1649  			0,                      // SWAPS
  1650  			"",                     // SOURCE_FUNCTION
  1651  			"",                     // SOURCE_FILE
  1652  			0,                      // SOURCE_LINE
  1653  		)
  1654  		e.rows = append(e.rows, event)
  1655  	}
  1656  }
  1657  
  1658  func (e *memblockRetriever) setDataForServersInfo() error {
  1659  	serversInfo, err := infosync.GetAllServerInfo(context.Background())
  1660  	if err != nil {
  1661  		return err
  1662  	}
  1663  	rows := make([][]types.Causet, 0, len(serversInfo))
  1664  	for _, info := range serversInfo {
  1665  		event := types.MakeCausets(
  1666  			info.ID,              // DBS_ID
  1667  			info.IP,              // IP
  1668  			int(info.Port),       // PORT
  1669  			int(info.StatusPort), // STATUS_PORT
  1670  			info.Lease,           // LEASE
  1671  			info.Version,         // VERSION
  1672  			info.GitHash,         // GIT_HASH
  1673  			info.BinlogStatus,    // BINLOG_STATUS
  1674  			stringutil.BuildStringFromLabels(info.Labels), // LABELS
  1675  		)
  1676  		rows = append(rows, event)
  1677  	}
  1678  	e.rows = rows
  1679  	return nil
  1680  }
  1681  
  1682  func (e *memblockRetriever) setDataFromSequences(ctx stochastikctx.Context, schemas []*perceptron.DBInfo) {
  1683  	checker := privilege.GetPrivilegeManager(ctx)
  1684  	var rows [][]types.Causet
  1685  	for _, schemaReplicant := range schemas {
  1686  		for _, causet := range schemaReplicant.Blocks {
  1687  			if !causet.IsSequence() {
  1688  				continue
  1689  			}
  1690  			if checker != nil && !checker.RequestVerification(ctx.GetStochastikVars().ActiveRoles, schemaReplicant.Name.L, causet.Name.L, "", allegrosql.AllPrivMask) {
  1691  				continue
  1692  			}
  1693  			record := types.MakeCausets(
  1694  				schemareplicant.CatalogVal, // TABLE_CATALOG
  1695  				schemaReplicant.Name.O,     // TABLE_SCHEMA
  1696  				causet.Name.O,              // TABLE_NAME
  1697  				causet.Sequence.Cache,      // Cache
  1698  				causet.Sequence.CacheValue, // CACHE_VALUE
  1699  				causet.Sequence.Cycle,      // CYCLE
  1700  				causet.Sequence.Increment,  // INCREMENT
  1701  				causet.Sequence.MaxValue,   // MAXVALUE
  1702  				causet.Sequence.MinValue,   // MINVALUE
  1703  				causet.Sequence.Start,      // START
  1704  				causet.Sequence.Comment,    // COMMENT
  1705  			)
  1706  			rows = append(rows, record)
  1707  		}
  1708  	}
  1709  	e.rows = rows
  1710  }
  1711  
  1712  // dataForBlockTiFlashReplica constructs data for causet tiflash replica info.
  1713  func (e *memblockRetriever) dataForBlockTiFlashReplica(ctx stochastikctx.Context, schemas []*perceptron.DBInfo) {
  1714  	var rows [][]types.Causet
  1715  	progressMap, err := infosync.GetTiFlashBlockSyncProgress(context.Background())
  1716  	if err != nil {
  1717  		ctx.GetStochastikVars().StmtCtx.AppendWarning(err)
  1718  	}
  1719  	for _, schemaReplicant := range schemas {
  1720  		for _, tbl := range schemaReplicant.Blocks {
  1721  			if tbl.TiFlashReplica == nil {
  1722  				continue
  1723  			}
  1724  			progress := 1.0
  1725  			if !tbl.TiFlashReplica.Available {
  1726  				if pi := tbl.GetPartitionInfo(); pi != nil && len(pi.Definitions) > 0 {
  1727  					progress = 0
  1728  					for _, p := range pi.Definitions {
  1729  						if tbl.TiFlashReplica.IsPartitionAvailable(p.ID) {
  1730  							progress += 1
  1731  						} else {
  1732  							progress += progressMap[p.ID]
  1733  						}
  1734  					}
  1735  					progress = progress / float64(len(pi.Definitions))
  1736  				} else {
  1737  					progress = progressMap[tbl.ID]
  1738  				}
  1739  			}
  1740  			record := types.MakeCausets(
  1741  				schemaReplicant.Name.O,          // TABLE_SCHEMA
  1742  				tbl.Name.O,                      // TABLE_NAME
  1743  				tbl.ID,                          // TABLE_ID
  1744  				int64(tbl.TiFlashReplica.Count), // REPLICA_COUNT
  1745  				strings.Join(tbl.TiFlashReplica.LocationLabels, ","), // LOCATION_LABELS
  1746  				tbl.TiFlashReplica.Available,                         // AVAILABLE
  1747  				progress,                                             // PROGRESS
  1748  			)
  1749  			rows = append(rows, record)
  1750  		}
  1751  	}
  1752  	e.rows = rows
  1753  	return
  1754  }
  1755  
  1756  func (e *memblockRetriever) setDataForStatementsSummary(ctx stochastikctx.Context, blockName string) error {
  1757  	user := ctx.GetStochastikVars().User
  1758  	isSuper := false
  1759  	if pm := privilege.GetPrivilegeManager(ctx); pm != nil {
  1760  		isSuper = pm.RequestVerificationWithUser("", "", "", allegrosql.SuperPriv, user)
  1761  	}
  1762  	switch blockName {
  1763  	case schemareplicant.BlockStatementsSummary,
  1764  		schemareplicant.ClusterBlockStatementsSummary:
  1765  		e.rows = stmtsummary.StmtSummaryByDigestMap.ToCurrentCauset(user, isSuper)
  1766  	case schemareplicant.BlockStatementsSummaryHistory,
  1767  		schemareplicant.ClusterBlockStatementsSummaryHistory:
  1768  		e.rows = stmtsummary.StmtSummaryByDigestMap.ToHistoryCauset(user, isSuper)
  1769  	}
  1770  	switch blockName {
  1771  	case schemareplicant.ClusterBlockStatementsSummary,
  1772  		schemareplicant.ClusterBlockStatementsSummaryHistory:
  1773  		rows, err := schemareplicant.AppendHostInfoToEvents(e.rows)
  1774  		if err != nil {
  1775  			return err
  1776  		}
  1777  		e.rows = rows
  1778  	}
  1779  	return nil
  1780  }
  1781  
  1782  type hugeMemBlockRetriever struct {
  1783  	dummyCloser
  1784  	causet      *perceptron.BlockInfo
  1785  	defCausumns []*perceptron.DeferredCausetInfo
  1786  	retrieved   bool
  1787  	initialized bool
  1788  	rows        [][]types.Causet
  1789  	dbs         []*perceptron.DBInfo
  1790  	dbsIdx      int
  1791  	tblIdx      int
  1792  }
  1793  
  1794  // retrieve implements the schemareplicantRetriever interface
  1795  func (e *hugeMemBlockRetriever) retrieve(ctx context.Context, sctx stochastikctx.Context) ([][]types.Causet, error) {
  1796  	if e.retrieved {
  1797  		return nil, nil
  1798  	}
  1799  
  1800  	if !e.initialized {
  1801  		is := schemareplicant.GetSchemaReplicant(sctx)
  1802  		dbs := is.AllSchemas()
  1803  		sort.Sort(schemareplicant.SchemasSorter(dbs))
  1804  		e.dbs = dbs
  1805  		e.initialized = true
  1806  		e.rows = make([][]types.Causet, 0, 1024)
  1807  	}
  1808  
  1809  	var err error
  1810  	switch e.causet.Name.O {
  1811  	case schemareplicant.BlockDeferredCausets:
  1812  		err = e.setDataForDeferredCausets(sctx)
  1813  	}
  1814  	if err != nil {
  1815  		return nil, err
  1816  	}
  1817  	e.retrieved = len(e.rows) == 0
  1818  
  1819  	return adjustDeferredCausets(e.rows, e.defCausumns, e.causet), nil
  1820  }
  1821  
  1822  func adjustDeferredCausets(input [][]types.Causet, outDeferredCausets []*perceptron.DeferredCausetInfo, causet *perceptron.BlockInfo) [][]types.Causet {
  1823  	if len(outDeferredCausets) == len(causet.DeferredCausets) {
  1824  		return input
  1825  	}
  1826  	rows := make([][]types.Causet, len(input))
  1827  	for i, fullEvent := range input {
  1828  		event := make([]types.Causet, len(outDeferredCausets))
  1829  		for j, defCaus := range outDeferredCausets {
  1830  			event[j] = fullEvent[defCaus.Offset]
  1831  		}
  1832  		rows[i] = event
  1833  	}
  1834  	return rows
  1835  }
  1836  
  1837  // TiFlashSystemBlockRetriever is used to read system causet from tiflash.
  1838  type TiFlashSystemBlockRetriever struct {
  1839  	dummyCloser
  1840  	causet         *perceptron.BlockInfo
  1841  	outputDefCauss []*perceptron.DeferredCausetInfo
  1842  	instanceCount  int
  1843  	instanceIdx    int
  1844  	instanceInfos  []tiflashInstanceInfo
  1845  	rowIdx         int
  1846  	retrieved      bool
  1847  	initialized    bool
  1848  	extractor      *causetembedded.TiFlashSystemBlockExtractor
  1849  }
  1850  
  1851  func (e *TiFlashSystemBlockRetriever) retrieve(ctx context.Context, sctx stochastikctx.Context) ([][]types.Causet, error) {
  1852  	if e.extractor.SkipRequest || e.retrieved {
  1853  		return nil, nil
  1854  	}
  1855  	if !e.initialized {
  1856  		err := e.initialize(sctx, e.extractor.TiFlashInstances)
  1857  		if err != nil {
  1858  			return nil, err
  1859  		}
  1860  	}
  1861  	if e.instanceCount == 0 || e.instanceIdx >= e.instanceCount {
  1862  		e.retrieved = true
  1863  		return nil, nil
  1864  	}
  1865  
  1866  	for {
  1867  		rows, err := e.dataForTiFlashSystemBlocks(sctx, e.extractor.MilevaDBDatabases, e.extractor.MilevaDBBlocks)
  1868  		if err != nil {
  1869  			return nil, err
  1870  		}
  1871  		if len(rows) > 0 || e.instanceIdx >= e.instanceCount {
  1872  			return rows, nil
  1873  		}
  1874  	}
  1875  }
  1876  
  1877  type tiflashInstanceInfo struct {
  1878  	id  string
  1879  	url string
  1880  }
  1881  
  1882  func (e *TiFlashSystemBlockRetriever) initialize(sctx stochastikctx.Context, tiflashInstances set.StringSet) error {
  1883  	causetstore := sctx.GetStore()
  1884  	if etcd, ok := causetstore.(einsteindb.EtcdBackend); ok {
  1885  		var addrs []string
  1886  		var err error
  1887  		if addrs, err = etcd.EtcdAddrs(); err != nil {
  1888  			return err
  1889  		}
  1890  		if addrs != nil {
  1891  			petriFromCtx := petri.GetPetri(sctx)
  1892  			if petriFromCtx != nil {
  1893  				cli := petriFromCtx.GetEtcdClient()
  1894  				prefix := "/tiflash/cluster/http_port/"
  1895  				ekv := clientv3.NewKV(cli)
  1896  				ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  1897  				resp, err := ekv.Get(ctx, prefix, clientv3.WithPrefix())
  1898  				cancel()
  1899  				if err != nil {
  1900  					return errors.Trace(err)
  1901  				}
  1902  				for _, ev := range resp.Ekvs {
  1903  					id := string(ev.Key)[len(prefix):]
  1904  					if len(tiflashInstances) > 0 && !tiflashInstances.Exist(id) {
  1905  						continue
  1906  					}
  1907  					url := fmt.Sprintf("%s://%s", soliton.InternalHTTPSchema(), ev.Value)
  1908  					req, err := http.NewRequest(http.MethodGet, url, nil)
  1909  					if err != nil {
  1910  						return errors.Trace(err)
  1911  					}
  1912  					_, err = soliton.InternalHTTPClient().Do(req)
  1913  					if err != nil {
  1914  						sctx.GetStochastikVars().StmtCtx.AppendWarning(err)
  1915  						continue
  1916  					}
  1917  					e.instanceInfos = append(e.instanceInfos, tiflashInstanceInfo{
  1918  						id:  id,
  1919  						url: url,
  1920  					})
  1921  					e.instanceCount += 1
  1922  				}
  1923  				e.initialized = true
  1924  				return nil
  1925  			}
  1926  		}
  1927  		return errors.Errorf("Etcd addrs not found")
  1928  	}
  1929  	return errors.Errorf("%T not an etcd backend", causetstore)
  1930  }
  1931  
  1932  func (e *TiFlashSystemBlockRetriever) dataForTiFlashSystemBlocks(ctx stochastikctx.Context, milevadbDatabases string, milevadbBlocks string) ([][]types.Causet, error) {
  1933  	var defCausumnNames []string
  1934  	for _, c := range e.outputDefCauss {
  1935  		if c.Name.O == "TIFLASH_INSTANCE" {
  1936  			continue
  1937  		}
  1938  		defCausumnNames = append(defCausumnNames, c.Name.L)
  1939  	}
  1940  	maxCount := 1024
  1941  	targetBlock := strings.ToLower(strings.Replace(e.causet.Name.O, "TIFLASH", "DT", 1))
  1942  	var filters []string
  1943  	if len(milevadbDatabases) > 0 {
  1944  		filters = append(filters, fmt.Sprintf("milevadb_database IN (%s)", strings.ReplaceAll(milevadbDatabases, "\"", "'")))
  1945  	}
  1946  	if len(milevadbBlocks) > 0 {
  1947  		filters = append(filters, fmt.Sprintf("milevadb_block IN (%s)", strings.ReplaceAll(milevadbBlocks, "\"", "'")))
  1948  	}
  1949  	allegrosql := fmt.Sprintf("SELECT %s FROM system.%s", strings.Join(defCausumnNames, ","), targetBlock)
  1950  	if len(filters) > 0 {
  1951  		allegrosql = fmt.Sprintf("%s WHERE %s", allegrosql, strings.Join(filters, " AND "))
  1952  	}
  1953  	allegrosql = fmt.Sprintf("%s LIMIT %d, %d", allegrosql, e.rowIdx, maxCount)
  1954  	notNumber := "nan"
  1955  	instanceInfo := e.instanceInfos[e.instanceIdx]
  1956  	url := instanceInfo.url
  1957  	req, err := http.NewRequest(http.MethodGet, url, nil)
  1958  	if err != nil {
  1959  		return nil, errors.Trace(err)
  1960  	}
  1961  	q := req.URL.Query()
  1962  	q.Add("query", allegrosql)
  1963  	req.URL.RawQuery = q.Encode()
  1964  	resp, err := soliton.InternalHTTPClient().Do(req)
  1965  	if err != nil {
  1966  		return nil, errors.Trace(err)
  1967  	}
  1968  	body, err := ioutil.ReadAll(resp.Body)
  1969  	terror.Log(resp.Body.Close())
  1970  	if err != nil {
  1971  		return nil, errors.Trace(err)
  1972  	}
  1973  	records := strings.Split(string(body), "\n")
  1974  	var rows [][]types.Causet
  1975  	for _, record := range records {
  1976  		if len(record) == 0 {
  1977  			continue
  1978  		}
  1979  		fields := strings.Split(record, "\t")
  1980  		if len(fields) < len(e.outputDefCauss)-1 {
  1981  			return nil, errors.Errorf("Record from tiflash doesn't match schemaReplicant %v", fields)
  1982  		}
  1983  		event := make([]types.Causet, len(e.outputDefCauss))
  1984  		for index, defCausumn := range e.outputDefCauss {
  1985  			if defCausumn.Name.O == "TIFLASH_INSTANCE" {
  1986  				continue
  1987  			}
  1988  			if defCausumn.Tp == allegrosql.TypeVarchar {
  1989  				event[index].SetString(fields[index], allegrosql.DefaultDefCauslationName)
  1990  			} else if defCausumn.Tp == allegrosql.TypeLonglong {
  1991  				if fields[index] == notNumber {
  1992  					continue
  1993  				}
  1994  				value, err := strconv.ParseInt(fields[index], 10, 64)
  1995  				if err != nil {
  1996  					return nil, errors.Trace(err)
  1997  				}
  1998  				event[index].SetInt64(value)
  1999  			} else if defCausumn.Tp == allegrosql.TypeDouble {
  2000  				if fields[index] == notNumber {
  2001  					continue
  2002  				}
  2003  				value, err := strconv.ParseFloat(fields[index], 64)
  2004  				if err != nil {
  2005  					return nil, errors.Trace(err)
  2006  				}
  2007  				event[index].SetFloat64(value)
  2008  			} else {
  2009  				return nil, errors.Errorf("Meet defCausumn of unknown type %v", defCausumn)
  2010  			}
  2011  		}
  2012  		event[len(e.outputDefCauss)-1].SetString(instanceInfo.id, allegrosql.DefaultDefCauslationName)
  2013  		rows = append(rows, event)
  2014  	}
  2015  	e.rowIdx += len(rows)
  2016  	if len(rows) < maxCount {
  2017  		e.instanceIdx += 1
  2018  		e.rowIdx = 0
  2019  	}
  2020  	return rows, nil
  2021  }