github.com/whtcorpsinc/MilevaDB-Prod@v0.0.0-20211104133533-f57f4be3b597/soliton/stmtsummary/statement_summary.go (about)

     1  // Copyright 2020 WHTCORPS INC, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package stmtsummary
    15  
    16  import (
    17  	"bytes"
    18  	"container/list"
    19  	"fmt"
    20  	"sort"
    21  	"strings"
    22  	"sync"
    23  	"sync/atomic"
    24  	"time"
    25  
    26  	"github.com/whtcorpsinc/BerolinaSQL/allegrosql"
    27  	"github.com/whtcorpsinc/BerolinaSQL/auth"
    28  	"github.com/whtcorpsinc/milevadb/soliton/ekvcache"
    29  	"github.com/whtcorpsinc/milevadb/soliton/execdetails"
    30  	"github.com/whtcorpsinc/milevadb/soliton/logutil"
    31  	"github.com/whtcorpsinc/milevadb/soliton/plancodec"
    32  	"github.com/whtcorpsinc/milevadb/soliton/replog"
    33  	"github.com/whtcorpsinc/milevadb/stochastikctx/stmtctx"
    34  	"github.com/whtcorpsinc/milevadb/types"
    35  	"go.uber.org/zap"
    36  )
    37  
    38  // stmtSummaryByDigestKey defines key for stmtSummaryByDigestMap.summaryMap.
    39  type stmtSummaryByDigestKey struct {
    40  	// Same memexs may appear in different schemaReplicant, but they refer to different blocks.
    41  	schemaName string
    42  	digest     string
    43  	// The digest of the previous memex.
    44  	prevDigest string
    45  	// The digest of the plan of this ALLEGROALLEGROSQL.
    46  	planDigest string
    47  	// `hash` is the hash value of this object.
    48  	hash []byte
    49  }
    50  
    51  // Hash implements SimpleLRUCache.Key.
    52  // Only when current ALLEGROALLEGROSQL is `commit` do we record `prevALLEGROSQL`. Otherwise, `prevALLEGROSQL` is empty.
    53  // `prevALLEGROSQL` is included in the key To distinguish different transactions.
    54  func (key *stmtSummaryByDigestKey) Hash() []byte {
    55  	if len(key.hash) == 0 {
    56  		key.hash = make([]byte, 0, len(key.schemaName)+len(key.digest)+len(key.prevDigest)+len(key.planDigest))
    57  		key.hash = append(key.hash, replog.Slice(key.digest)...)
    58  		key.hash = append(key.hash, replog.Slice(key.schemaName)...)
    59  		key.hash = append(key.hash, replog.Slice(key.prevDigest)...)
    60  		key.hash = append(key.hash, replog.Slice(key.planDigest)...)
    61  	}
    62  	return key.hash
    63  }
    64  
    65  // stmtSummaryByDigestMap is a LRU cache that stores memex summaries.
    66  type stmtSummaryByDigestMap struct {
    67  	// It's rare to read concurrently, so RWMutex is not needed.
    68  	sync.Mutex
    69  	summaryMap *ekvcache.SimpleLRUCache
    70  	// beginTimeForCurInterval is the begin time for current summary.
    71  	beginTimeForCurInterval int64
    72  
    73  	// sysVars encapsulates system variables needed to control memex summary.
    74  	sysVars *systemVars
    75  }
    76  
    77  // StmtSummaryByDigestMap is a global map containing all memex summaries.
    78  var StmtSummaryByDigestMap = newStmtSummaryByDigestMap()
    79  
    80  // stmtSummaryByDigest is the summary for each type of memexs.
    81  type stmtSummaryByDigest struct {
    82  	// It's rare to read concurrently, so RWMutex is not needed.
    83  	// Mutex is only used to dagger `history`.
    84  	sync.Mutex
    85  	initialized bool
    86  	// Each element in history is a summary in one interval.
    87  	history *list.List
    88  	// Following fields are common for each summary element.
    89  	// They won't change once this object is created, so locking is not needed.
    90  	schemaName           string
    91  	digest               string
    92  	planDigest           string
    93  	stmtType             string
    94  	normalizedALLEGROSQL string
    95  	blockNames           string
    96  	isInternal           bool
    97  }
    98  
    99  // stmtSummaryByDigestElement is the summary for each type of memexs in current interval.
   100  type stmtSummaryByDigestElement struct {
   101  	sync.Mutex
   102  	// Each summary is summarized between [beginTime, endTime).
   103  	beginTime int64
   104  	endTime   int64
   105  	// basic
   106  	sampleALLEGROSQL string
   107  	prevALLEGROSQL   string
   108  	sampleCauset     string
   109  	indexNames       []string
   110  	execCount        int64
   111  	sumErrors        int
   112  	sumWarnings      int
   113  	// latency
   114  	sumLatency        time.Duration
   115  	maxLatency        time.Duration
   116  	minLatency        time.Duration
   117  	sumParseLatency   time.Duration
   118  	maxParseLatency   time.Duration
   119  	sumCompileLatency time.Duration
   120  	maxCompileLatency time.Duration
   121  	// interlock
   122  	sumNumCausetTasks    int64
   123  	maxCopProcessTime    time.Duration
   124  	maxCopProcessAddress string
   125  	maxCopWaitTime       time.Duration
   126  	maxCopWaitAddress    string
   127  	// EinsteinDB
   128  	sumProcessTime   time.Duration
   129  	maxProcessTime   time.Duration
   130  	sumWaitTime      time.Duration
   131  	maxWaitTime      time.Duration
   132  	sumBackoffTime   time.Duration
   133  	maxBackoffTime   time.Duration
   134  	sumTotalKeys     int64
   135  	maxTotalKeys     int64
   136  	sumProcessedKeys int64
   137  	maxProcessedKeys int64
   138  	// txn
   139  	commitCount            int64
   140  	sumGetCommitTsTime     time.Duration
   141  	maxGetCommitTsTime     time.Duration
   142  	sumPrewriteTime        time.Duration
   143  	maxPrewriteTime        time.Duration
   144  	sumCommitTime          time.Duration
   145  	maxCommitTime          time.Duration
   146  	sumLocalLatchTime      time.Duration
   147  	maxLocalLatchTime      time.Duration
   148  	sumCommitBackoffTime   int64
   149  	maxCommitBackoffTime   int64
   150  	sumResolveLockTime     int64
   151  	maxResolveLockTime     int64
   152  	sumWriteKeys           int64
   153  	maxWriteKeys           int
   154  	sumWriteSize           int64
   155  	maxWriteSize           int
   156  	sumPrewriteRegionNum   int64
   157  	maxPrewriteRegionNum   int32
   158  	sumTxnRetry            int64
   159  	maxTxnRetry            int
   160  	sumInterDircRetryCount int64
   161  	sumInterDircRetryTime  time.Duration
   162  	sumBackoffTimes        int64
   163  	backoffTypes           map[fmt.Stringer]int
   164  	authUsers              map[string]struct{}
   165  	// other
   166  	sumMem          int64
   167  	maxMem          int64
   168  	sumDisk         int64
   169  	maxDisk         int64
   170  	sumAffectedRows uint64
   171  	// The first time this type of ALLEGROALLEGROSQL executes.
   172  	firstSeen time.Time
   173  	// The last time this type of ALLEGROALLEGROSQL executes.
   174  	lastSeen time.Time
   175  	// plan cache
   176  	planInCache   bool
   177  	planCacheHits int64
   178  	// pessimistic execution retry information.
   179  	execRetryCount uint
   180  	execRetryTime  time.Duration
   181  }
   182  
   183  // StmtInterDircInfo records execution information of each memex.
   184  type StmtInterDircInfo struct {
   185  	SchemaName           string
   186  	OriginalALLEGROSQL   string
   187  	NormalizedALLEGROSQL string
   188  	Digest               string
   189  	PrevALLEGROSQL       string
   190  	PrevALLEGROSQLDigest string
   191  	CausetGenerator      func() string
   192  	CausetDigest         string
   193  	CausetDigestGen      func() string
   194  	User                 string
   195  	TotalLatency         time.Duration
   196  	ParseLatency         time.Duration
   197  	CompileLatency       time.Duration
   198  	StmtCtx              *stmtctx.StatementContext
   199  	CausetTasks          *stmtctx.CausetTasksDetails
   200  	InterDircDetail      *execdetails.InterDircDetails
   201  	MemMax               int64
   202  	DiskMax              int64
   203  	StartTime            time.Time
   204  	IsInternal           bool
   205  	Succeed              bool
   206  	CausetInCache        bool
   207  	InterDircRetryCount  uint
   208  	InterDircRetryTime   time.Duration
   209  }
   210  
   211  // newStmtSummaryByDigestMap creates an empty stmtSummaryByDigestMap.
   212  func newStmtSummaryByDigestMap() *stmtSummaryByDigestMap {
   213  	sysVars := newSysVars()
   214  	maxStmtCount := uint(sysVars.getVariable(typeMaxStmtCount))
   215  	return &stmtSummaryByDigestMap{
   216  		summaryMap: ekvcache.NewSimpleLRUCache(maxStmtCount, 0, 0),
   217  		sysVars:    sysVars,
   218  	}
   219  }
   220  
   221  // AddStatement adds a memex to StmtSummaryByDigestMap.
   222  func (ssMap *stmtSummaryByDigestMap) AddStatement(sei *StmtInterDircInfo) {
   223  	// All times are counted in seconds.
   224  	now := time.Now().Unix()
   225  
   226  	intervalSeconds := ssMap.refreshInterval()
   227  	historySize := ssMap.historySize()
   228  
   229  	key := &stmtSummaryByDigestKey{
   230  		schemaName: sei.SchemaName,
   231  		digest:     sei.Digest,
   232  		prevDigest: sei.PrevALLEGROSQLDigest,
   233  		planDigest: sei.CausetDigest,
   234  	}
   235  	// Calculate hash value in advance, to reduce the time holding the dagger.
   236  	key.Hash()
   237  
   238  	// Enclose the causet in a function to ensure the dagger will always be released.
   239  	summary, beginTime := func() (*stmtSummaryByDigest, int64) {
   240  		ssMap.Lock()
   241  		defer ssMap.Unlock()
   242  
   243  		// Check again. Statements could be added before disabling the flag and after Clear().
   244  		if !ssMap.Enabled() {
   245  			return nil, 0
   246  		}
   247  		if sei.IsInternal && !ssMap.EnabledInternal() {
   248  			return nil, 0
   249  		}
   250  
   251  		if ssMap.beginTimeForCurInterval+intervalSeconds <= now {
   252  			// `beginTimeForCurInterval` is a multiple of intervalSeconds, so that when the interval is a multiple
   253  			// of 60 (or 600, 1800, 3600, etc), begin time shows 'XX:XX:00', not 'XX:XX:01'~'XX:XX:59'.
   254  			ssMap.beginTimeForCurInterval = now / intervalSeconds * intervalSeconds
   255  		}
   256  
   257  		beginTime := ssMap.beginTimeForCurInterval
   258  		value, ok := ssMap.summaryMap.Get(key)
   259  		var summary *stmtSummaryByDigest
   260  		if !ok {
   261  			// Lazy initialize it to release ssMap.mutex ASAP.
   262  			summary = new(stmtSummaryByDigest)
   263  			ssMap.summaryMap.Put(key, summary)
   264  		} else {
   265  			summary = value.(*stmtSummaryByDigest)
   266  		}
   267  		summary.isInternal = summary.isInternal && sei.IsInternal
   268  		return summary, beginTime
   269  	}()
   270  
   271  	// Lock a single entry, not the whole cache.
   272  	if summary != nil {
   273  		summary.add(sei, beginTime, intervalSeconds, historySize)
   274  	}
   275  }
   276  
   277  // Clear removes all memex summaries.
   278  func (ssMap *stmtSummaryByDigestMap) Clear() {
   279  	ssMap.Lock()
   280  	defer ssMap.Unlock()
   281  
   282  	ssMap.summaryMap.DeleteAll()
   283  	ssMap.beginTimeForCurInterval = 0
   284  }
   285  
   286  // clearInternal removes all memex summaries which are internal summaries.
   287  func (ssMap *stmtSummaryByDigestMap) clearInternal() {
   288  	ssMap.Lock()
   289  	defer ssMap.Unlock()
   290  
   291  	for _, key := range ssMap.summaryMap.Keys() {
   292  		summary, ok := ssMap.summaryMap.Get(key)
   293  		if !ok {
   294  			continue
   295  		}
   296  		if summary.(*stmtSummaryByDigest).isInternal {
   297  			ssMap.summaryMap.Delete(key)
   298  		}
   299  	}
   300  }
   301  
   302  // ToCurrentCauset converts current memex summaries to causet.
   303  func (ssMap *stmtSummaryByDigestMap) ToCurrentCauset(user *auth.UserIdentity, isSuper bool) [][]types.Causet {
   304  	ssMap.Lock()
   305  	values := ssMap.summaryMap.Values()
   306  	beginTime := ssMap.beginTimeForCurInterval
   307  	ssMap.Unlock()
   308  
   309  	rows := make([][]types.Causet, 0, len(values))
   310  	for _, value := range values {
   311  		record := value.(*stmtSummaryByDigest).toCurrentCauset(beginTime, user, isSuper)
   312  		if record != nil {
   313  			rows = append(rows, record)
   314  		}
   315  	}
   316  	return rows
   317  }
   318  
   319  // ToHistoryCauset converts history memexs summaries to causet.
   320  func (ssMap *stmtSummaryByDigestMap) ToHistoryCauset(user *auth.UserIdentity, isSuper bool) [][]types.Causet {
   321  	ssMap.Lock()
   322  	values := ssMap.summaryMap.Values()
   323  	ssMap.Unlock()
   324  
   325  	historySize := ssMap.historySize()
   326  	rows := make([][]types.Causet, 0, len(values)*historySize)
   327  	for _, value := range values {
   328  		records := value.(*stmtSummaryByDigest).toHistoryCauset(historySize, user, isSuper)
   329  		rows = append(rows, records...)
   330  	}
   331  	return rows
   332  }
   333  
   334  // GetMoreThanOnceSelect gets users' select ALLEGROSQLs that occurred more than once.
   335  func (ssMap *stmtSummaryByDigestMap) GetMoreThanOnceSelect() ([]string, []string) {
   336  	ssMap.Lock()
   337  	values := ssMap.summaryMap.Values()
   338  	ssMap.Unlock()
   339  
   340  	schemas := make([]string, 0, len(values))
   341  	sqls := make([]string, 0, len(values))
   342  	for _, value := range values {
   343  		ssbd := value.(*stmtSummaryByDigest)
   344  		func() {
   345  			ssbd.Lock()
   346  			defer ssbd.Unlock()
   347  			if ssbd.initialized && ssbd.stmtType == "Select" {
   348  				if ssbd.history.Len() > 0 {
   349  					ssElement := ssbd.history.Back().Value.(*stmtSummaryByDigestElement)
   350  					ssElement.Lock()
   351  
   352  					// Empty auth users means that it is an internal queries.
   353  					if len(ssElement.authUsers) > 0 && (ssbd.history.Len() > 1 || ssElement.execCount > 1) {
   354  						schemas = append(schemas, ssbd.schemaName)
   355  						sqls = append(sqls, ssElement.sampleALLEGROSQL)
   356  					}
   357  					ssElement.Unlock()
   358  				}
   359  			}
   360  		}()
   361  	}
   362  	return schemas, sqls
   363  }
   364  
   365  // SetEnabled enables or disables memex summary in global(cluster) or stochastik(server) scope.
   366  func (ssMap *stmtSummaryByDigestMap) SetEnabled(value string, inStochastik bool) error {
   367  	if err := ssMap.sysVars.setVariable(typeEnable, value, inStochastik); err != nil {
   368  		return err
   369  	}
   370  
   371  	// Clear all summaries once memex summary is disabled.
   372  	if ssMap.sysVars.getVariable(typeEnable) == 0 {
   373  		ssMap.Clear()
   374  	}
   375  	return nil
   376  }
   377  
   378  // Enabled returns whether memex summary is enabled.
   379  func (ssMap *stmtSummaryByDigestMap) Enabled() bool {
   380  	return ssMap.sysVars.getVariable(typeEnable) > 0
   381  }
   382  
   383  // SetEnabledInternalQuery enables or disables internal memex summary in global(cluster) or stochastik(server) scope.
   384  func (ssMap *stmtSummaryByDigestMap) SetEnabledInternalQuery(value string, inStochastik bool) error {
   385  	if err := ssMap.sysVars.setVariable(typeEnableInternalQuery, value, inStochastik); err != nil {
   386  		return err
   387  	}
   388  
   389  	// Clear all summaries once memex summary is disabled.
   390  	if ssMap.sysVars.getVariable(typeEnableInternalQuery) == 0 {
   391  		ssMap.clearInternal()
   392  	}
   393  	return nil
   394  }
   395  
   396  // EnabledInternal returns whether internal memex summary is enabled.
   397  func (ssMap *stmtSummaryByDigestMap) EnabledInternal() bool {
   398  	return ssMap.sysVars.getVariable(typeEnableInternalQuery) > 0
   399  }
   400  
   401  // SetRefreshInterval sets refreshing interval in ssMap.sysVars.
   402  func (ssMap *stmtSummaryByDigestMap) SetRefreshInterval(value string, inStochastik bool) error {
   403  	return ssMap.sysVars.setVariable(typeRefreshInterval, value, inStochastik)
   404  }
   405  
   406  // refreshInterval gets the refresh interval for summaries.
   407  func (ssMap *stmtSummaryByDigestMap) refreshInterval() int64 {
   408  	return ssMap.sysVars.getVariable(typeRefreshInterval)
   409  }
   410  
   411  // SetHistorySize sets the history size for all summaries.
   412  func (ssMap *stmtSummaryByDigestMap) SetHistorySize(value string, inStochastik bool) error {
   413  	return ssMap.sysVars.setVariable(typeHistorySize, value, inStochastik)
   414  }
   415  
   416  // historySize gets the history size for summaries.
   417  func (ssMap *stmtSummaryByDigestMap) historySize() int {
   418  	return int(ssMap.sysVars.getVariable(typeHistorySize))
   419  }
   420  
   421  // SetHistorySize sets the history size for all summaries.
   422  func (ssMap *stmtSummaryByDigestMap) SetMaxStmtCount(value string, inStochastik bool) error {
   423  	if err := ssMap.sysVars.setVariable(typeMaxStmtCount, value, inStochastik); err != nil {
   424  		return err
   425  	}
   426  	capacity := ssMap.sysVars.getVariable(typeMaxStmtCount)
   427  
   428  	ssMap.Lock()
   429  	defer ssMap.Unlock()
   430  	return ssMap.summaryMap.SetCapacity(uint(capacity))
   431  }
   432  
   433  func (ssMap *stmtSummaryByDigestMap) maxStmtCount() int {
   434  	return int(ssMap.sysVars.getVariable(typeMaxStmtCount))
   435  }
   436  
   437  // SetHistorySize sets the history size for all summaries.
   438  func (ssMap *stmtSummaryByDigestMap) SetMaxALLEGROSQLLength(value string, inStochastik bool) error {
   439  	return ssMap.sysVars.setVariable(typeMaxALLEGROSQLLength, value, inStochastik)
   440  }
   441  
   442  func (ssMap *stmtSummaryByDigestMap) maxALLEGROSQLLength() int {
   443  	return int(ssMap.sysVars.getVariable(typeMaxALLEGROSQLLength))
   444  }
   445  
   446  // newStmtSummaryByDigest creates a stmtSummaryByDigest from StmtInterDircInfo.
   447  func (ssbd *stmtSummaryByDigest) init(sei *StmtInterDircInfo, beginTime int64, intervalSeconds int64, historySize int) {
   448  	// Use "," to separate causet names to support FIND_IN_SET.
   449  	var buffer bytes.Buffer
   450  	for i, value := range sei.StmtCtx.Blocks {
   451  		// In `create database` memex, EDB name is not empty but causet name is empty.
   452  		if len(value.Block) == 0 {
   453  			continue
   454  		}
   455  		buffer.WriteString(strings.ToLower(value.EDB))
   456  		buffer.WriteString(".")
   457  		buffer.WriteString(strings.ToLower(value.Block))
   458  		if i < len(sei.StmtCtx.Blocks)-1 {
   459  			buffer.WriteString(",")
   460  		}
   461  	}
   462  	blockNames := buffer.String()
   463  
   464  	planDigest := sei.CausetDigest
   465  	if sei.CausetDigestGen != nil && len(planDigest) == 0 {
   466  		// It comes here only when the plan is 'Point_Get'.
   467  		planDigest = sei.CausetDigestGen()
   468  	}
   469  	ssbd.schemaName = sei.SchemaName
   470  	ssbd.digest = sei.Digest
   471  	ssbd.planDigest = planDigest
   472  	ssbd.stmtType = sei.StmtCtx.StmtType
   473  	ssbd.normalizedALLEGROSQL = formatALLEGROSQL(sei.NormalizedALLEGROSQL)
   474  	ssbd.blockNames = blockNames
   475  	ssbd.history = list.New()
   476  	ssbd.initialized = true
   477  }
   478  
   479  func (ssbd *stmtSummaryByDigest) add(sei *StmtInterDircInfo, beginTime int64, intervalSeconds int64, historySize int) {
   480  	// Enclose this causet in a function to ensure the dagger will always be released.
   481  	ssElement, isElementNew := func() (*stmtSummaryByDigestElement, bool) {
   482  		ssbd.Lock()
   483  		defer ssbd.Unlock()
   484  
   485  		if !ssbd.initialized {
   486  			ssbd.init(sei, beginTime, intervalSeconds, historySize)
   487  		}
   488  
   489  		var ssElement *stmtSummaryByDigestElement
   490  		isElementNew := true
   491  		if ssbd.history.Len() > 0 {
   492  			lastElement := ssbd.history.Back().Value.(*stmtSummaryByDigestElement)
   493  			if lastElement.beginTime >= beginTime {
   494  				ssElement = lastElement
   495  				isElementNew = false
   496  			} else {
   497  				// The last elements expires to the history.
   498  				lastElement.onExpire(intervalSeconds)
   499  			}
   500  		}
   501  		if isElementNew {
   502  			// If the element is new created, `ssElement.add(sei)` should be done inside the dagger of `ssbd`.
   503  			ssElement = newStmtSummaryByDigestElement(sei, beginTime, intervalSeconds)
   504  			ssbd.history.PushBack(ssElement)
   505  		}
   506  
   507  		// `historySize` might be modified anytime, so check expiration every time.
   508  		// Even if history is set to 0, current summary is still needed.
   509  		for ssbd.history.Len() > historySize && ssbd.history.Len() > 1 {
   510  			ssbd.history.Remove(ssbd.history.Front())
   511  		}
   512  
   513  		return ssElement, isElementNew
   514  	}()
   515  
   516  	// Lock a single entry, not the whole `ssbd`.
   517  	if !isElementNew {
   518  		ssElement.add(sei, intervalSeconds)
   519  	}
   520  }
   521  
   522  func (ssbd *stmtSummaryByDigest) toCurrentCauset(beginTimeForCurInterval int64, user *auth.UserIdentity, isSuper bool) []types.Causet {
   523  	var ssElement *stmtSummaryByDigestElement
   524  
   525  	ssbd.Lock()
   526  	if ssbd.initialized && ssbd.history.Len() > 0 {
   527  		ssElement = ssbd.history.Back().Value.(*stmtSummaryByDigestElement)
   528  	}
   529  	ssbd.Unlock()
   530  
   531  	// `ssElement` is lazy expired, so expired elements could also be read.
   532  	// `beginTime` won't change since `ssElement` is created, so locking is not needed here.
   533  	isAuthed := true
   534  	if user != nil && !isSuper {
   535  		_, isAuthed = ssElement.authUsers[user.Username]
   536  	}
   537  	if ssElement == nil || ssElement.beginTime < beginTimeForCurInterval || !isAuthed {
   538  		return nil
   539  	}
   540  	return ssElement.toCauset(ssbd)
   541  }
   542  
   543  func (ssbd *stmtSummaryByDigest) toHistoryCauset(historySize int, user *auth.UserIdentity, isSuper bool) [][]types.Causet {
   544  	// DefCauslect all history summaries to an array.
   545  	ssElements := ssbd.defCauslectHistorySummaries(historySize)
   546  
   547  	rows := make([][]types.Causet, 0, len(ssElements))
   548  	for _, ssElement := range ssElements {
   549  		isAuthed := true
   550  		if user != nil && !isSuper {
   551  			_, isAuthed = ssElement.authUsers[user.Username]
   552  		}
   553  		if isAuthed {
   554  			rows = append(rows, ssElement.toCauset(ssbd))
   555  		}
   556  	}
   557  	return rows
   558  }
   559  
   560  // defCauslectHistorySummaries puts at most `historySize` summaries to an array.
   561  func (ssbd *stmtSummaryByDigest) defCauslectHistorySummaries(historySize int) []*stmtSummaryByDigestElement {
   562  	ssbd.Lock()
   563  	defer ssbd.Unlock()
   564  
   565  	if !ssbd.initialized {
   566  		return nil
   567  	}
   568  	ssElements := make([]*stmtSummaryByDigestElement, 0, ssbd.history.Len())
   569  	for listElement := ssbd.history.Front(); listElement != nil && len(ssElements) < historySize; listElement = listElement.Next() {
   570  		ssElement := listElement.Value.(*stmtSummaryByDigestElement)
   571  		ssElements = append(ssElements, ssElement)
   572  	}
   573  	return ssElements
   574  }
   575  
   576  func newStmtSummaryByDigestElement(sei *StmtInterDircInfo, beginTime int64, intervalSeconds int64) *stmtSummaryByDigestElement {
   577  	// sampleALLEGROSQL / authUsers(sampleUser) / sampleCauset / prevALLEGROSQL / indexNames causetstore the values shown at the first time,
   578  	// because it compacts performance to uFIDelate every time.
   579  	ssElement := &stmtSummaryByDigestElement{
   580  		beginTime:        beginTime,
   581  		sampleALLEGROSQL: formatALLEGROSQL(sei.OriginalALLEGROSQL),
   582  		// PrevALLEGROSQL is already truncated to cfg.Log.QueryLogMaxLen.
   583  		prevALLEGROSQL: sei.PrevALLEGROSQL,
   584  		// sampleCauset needs to be decoded so it can't be truncated.
   585  		sampleCauset:  sei.CausetGenerator(),
   586  		indexNames:    sei.StmtCtx.IndexNames,
   587  		minLatency:    sei.TotalLatency,
   588  		firstSeen:     sei.StartTime,
   589  		lastSeen:      sei.StartTime,
   590  		backoffTypes:  make(map[fmt.Stringer]int),
   591  		authUsers:     make(map[string]struct{}),
   592  		planInCache:   false,
   593  		planCacheHits: 0,
   594  	}
   595  	ssElement.add(sei, intervalSeconds)
   596  	return ssElement
   597  }
   598  
   599  // onExpire is called when this element expires to history.
   600  func (ssElement *stmtSummaryByDigestElement) onExpire(intervalSeconds int64) {
   601  	ssElement.Lock()
   602  	defer ssElement.Unlock()
   603  
   604  	// refreshInterval may change anytime, so we need to uFIDelate endTime.
   605  	if ssElement.beginTime+intervalSeconds > ssElement.endTime {
   606  		// // If interval changes to a bigger value, uFIDelate endTime to beginTime + interval.
   607  		ssElement.endTime = ssElement.beginTime + intervalSeconds
   608  	} else if ssElement.beginTime+intervalSeconds < ssElement.endTime {
   609  		now := time.Now().Unix()
   610  		// If interval changes to a smaller value and now > beginTime + interval, uFIDelate endTime to current time.
   611  		if now > ssElement.beginTime+intervalSeconds {
   612  			ssElement.endTime = now
   613  		}
   614  	}
   615  }
   616  
   617  func (ssElement *stmtSummaryByDigestElement) add(sei *StmtInterDircInfo, intervalSeconds int64) {
   618  	ssElement.Lock()
   619  	defer ssElement.Unlock()
   620  
   621  	// add user to auth users set
   622  	if len(sei.User) > 0 {
   623  		ssElement.authUsers[sei.User] = struct{}{}
   624  	}
   625  
   626  	// refreshInterval may change anytime, uFIDelate endTime ASAP.
   627  	ssElement.endTime = ssElement.beginTime + intervalSeconds
   628  	ssElement.execCount++
   629  	if !sei.Succeed {
   630  		ssElement.sumErrors += 1
   631  	}
   632  	ssElement.sumWarnings += int(sei.StmtCtx.WarningCount())
   633  
   634  	// latency
   635  	ssElement.sumLatency += sei.TotalLatency
   636  	if sei.TotalLatency > ssElement.maxLatency {
   637  		ssElement.maxLatency = sei.TotalLatency
   638  	}
   639  	if sei.TotalLatency < ssElement.minLatency {
   640  		ssElement.minLatency = sei.TotalLatency
   641  	}
   642  	ssElement.sumParseLatency += sei.ParseLatency
   643  	if sei.ParseLatency > ssElement.maxParseLatency {
   644  		ssElement.maxParseLatency = sei.ParseLatency
   645  	}
   646  	ssElement.sumCompileLatency += sei.CompileLatency
   647  	if sei.CompileLatency > ssElement.maxCompileLatency {
   648  		ssElement.maxCompileLatency = sei.CompileLatency
   649  	}
   650  
   651  	// interlock
   652  	numCausetTasks := int64(sei.CausetTasks.NumCausetTasks)
   653  	ssElement.sumNumCausetTasks += numCausetTasks
   654  	if sei.CausetTasks.MaxProcessTime > ssElement.maxCopProcessTime {
   655  		ssElement.maxCopProcessTime = sei.CausetTasks.MaxProcessTime
   656  		ssElement.maxCopProcessAddress = sei.CausetTasks.MaxProcessAddress
   657  	}
   658  	if sei.CausetTasks.MaxWaitTime > ssElement.maxCopWaitTime {
   659  		ssElement.maxCopWaitTime = sei.CausetTasks.MaxWaitTime
   660  		ssElement.maxCopWaitAddress = sei.CausetTasks.MaxWaitAddress
   661  	}
   662  
   663  	// EinsteinDB
   664  	ssElement.sumProcessTime += sei.InterDircDetail.ProcessTime
   665  	if sei.InterDircDetail.ProcessTime > ssElement.maxProcessTime {
   666  		ssElement.maxProcessTime = sei.InterDircDetail.ProcessTime
   667  	}
   668  	ssElement.sumWaitTime += sei.InterDircDetail.WaitTime
   669  	if sei.InterDircDetail.WaitTime > ssElement.maxWaitTime {
   670  		ssElement.maxWaitTime = sei.InterDircDetail.WaitTime
   671  	}
   672  	ssElement.sumBackoffTime += sei.InterDircDetail.BackoffTime
   673  	if sei.InterDircDetail.BackoffTime > ssElement.maxBackoffTime {
   674  		ssElement.maxBackoffTime = sei.InterDircDetail.BackoffTime
   675  	}
   676  	ssElement.sumTotalKeys += sei.InterDircDetail.TotalKeys
   677  	if sei.InterDircDetail.TotalKeys > ssElement.maxTotalKeys {
   678  		ssElement.maxTotalKeys = sei.InterDircDetail.TotalKeys
   679  	}
   680  	ssElement.sumProcessedKeys += sei.InterDircDetail.ProcessedKeys
   681  	if sei.InterDircDetail.ProcessedKeys > ssElement.maxProcessedKeys {
   682  		ssElement.maxProcessedKeys = sei.InterDircDetail.ProcessedKeys
   683  	}
   684  
   685  	// txn
   686  	commitDetails := sei.InterDircDetail.CommitDetail
   687  	if commitDetails != nil {
   688  		ssElement.commitCount++
   689  		ssElement.sumPrewriteTime += commitDetails.PrewriteTime
   690  		if commitDetails.PrewriteTime > ssElement.maxPrewriteTime {
   691  			ssElement.maxPrewriteTime = commitDetails.PrewriteTime
   692  		}
   693  		ssElement.sumCommitTime += commitDetails.CommitTime
   694  		if commitDetails.CommitTime > ssElement.maxCommitTime {
   695  			ssElement.maxCommitTime = commitDetails.CommitTime
   696  		}
   697  		ssElement.sumGetCommitTsTime += commitDetails.GetCommitTsTime
   698  		if commitDetails.GetCommitTsTime > ssElement.maxGetCommitTsTime {
   699  			ssElement.maxGetCommitTsTime = commitDetails.GetCommitTsTime
   700  		}
   701  		commitBackoffTime := atomic.LoadInt64(&commitDetails.CommitBackoffTime)
   702  		ssElement.sumCommitBackoffTime += commitBackoffTime
   703  		if commitBackoffTime > ssElement.maxCommitBackoffTime {
   704  			ssElement.maxCommitBackoffTime = commitBackoffTime
   705  		}
   706  		resolveLockTime := atomic.LoadInt64(&commitDetails.ResolveLockTime)
   707  		ssElement.sumResolveLockTime += resolveLockTime
   708  		if resolveLockTime > ssElement.maxResolveLockTime {
   709  			ssElement.maxResolveLockTime = resolveLockTime
   710  		}
   711  		ssElement.sumLocalLatchTime += commitDetails.LocalLatchTime
   712  		if commitDetails.LocalLatchTime > ssElement.maxLocalLatchTime {
   713  			ssElement.maxLocalLatchTime = commitDetails.LocalLatchTime
   714  		}
   715  		ssElement.sumWriteKeys += int64(commitDetails.WriteKeys)
   716  		if commitDetails.WriteKeys > ssElement.maxWriteKeys {
   717  			ssElement.maxWriteKeys = commitDetails.WriteKeys
   718  		}
   719  		ssElement.sumWriteSize += int64(commitDetails.WriteSize)
   720  		if commitDetails.WriteSize > ssElement.maxWriteSize {
   721  			ssElement.maxWriteSize = commitDetails.WriteSize
   722  		}
   723  		prewriteRegionNum := atomic.LoadInt32(&commitDetails.PrewriteRegionNum)
   724  		ssElement.sumPrewriteRegionNum += int64(prewriteRegionNum)
   725  		if prewriteRegionNum > ssElement.maxPrewriteRegionNum {
   726  			ssElement.maxPrewriteRegionNum = prewriteRegionNum
   727  		}
   728  		ssElement.sumTxnRetry += int64(commitDetails.TxnRetry)
   729  		if commitDetails.TxnRetry > ssElement.maxTxnRetry {
   730  			ssElement.maxTxnRetry = commitDetails.TxnRetry
   731  		}
   732  		commitDetails.Mu.Lock()
   733  		ssElement.sumBackoffTimes += int64(len(commitDetails.Mu.BackoffTypes))
   734  		for _, backoffType := range commitDetails.Mu.BackoffTypes {
   735  			ssElement.backoffTypes[backoffType] += 1
   736  		}
   737  		commitDetails.Mu.Unlock()
   738  	}
   739  
   740  	//plan cache
   741  	if sei.CausetInCache {
   742  		ssElement.planInCache = true
   743  		ssElement.planCacheHits += 1
   744  	} else {
   745  		ssElement.planInCache = false
   746  	}
   747  
   748  	// other
   749  	ssElement.sumAffectedRows += sei.StmtCtx.AffectedRows()
   750  	ssElement.sumMem += sei.MemMax
   751  	if sei.MemMax > ssElement.maxMem {
   752  		ssElement.maxMem = sei.MemMax
   753  	}
   754  	ssElement.sumDisk += sei.DiskMax
   755  	if sei.DiskMax > ssElement.maxDisk {
   756  		ssElement.maxDisk = sei.DiskMax
   757  	}
   758  	if sei.StartTime.Before(ssElement.firstSeen) {
   759  		ssElement.firstSeen = sei.StartTime
   760  	}
   761  	if ssElement.lastSeen.Before(sei.StartTime) {
   762  		ssElement.lastSeen = sei.StartTime
   763  	}
   764  	if sei.InterDircRetryCount > 0 {
   765  		ssElement.execRetryCount += sei.InterDircRetryCount
   766  		ssElement.execRetryTime += sei.InterDircRetryTime
   767  	}
   768  }
   769  
   770  func (ssElement *stmtSummaryByDigestElement) toCauset(ssbd *stmtSummaryByDigest) []types.Causet {
   771  	ssElement.Lock()
   772  	defer ssElement.Unlock()
   773  
   774  	plan, err := plancodec.DecodeCauset(ssElement.sampleCauset)
   775  	if err != nil {
   776  		logutil.BgLogger().Error("decode plan in memex summary failed", zap.String("plan", ssElement.sampleCauset), zap.Error(err))
   777  		plan = ""
   778  	}
   779  
   780  	sampleUser := ""
   781  	for key := range ssElement.authUsers {
   782  		sampleUser = key
   783  		break
   784  	}
   785  
   786  	// Actually, there's a small chance that endTime is out of date, but it's hard to keep it up to date all the time.
   787  	return types.MakeCausets(
   788  		types.NewTime(types.FromGoTime(time.Unix(ssElement.beginTime, 0)), allegrosql.TypeTimestamp, 0),
   789  		types.NewTime(types.FromGoTime(time.Unix(ssElement.endTime, 0)), allegrosql.TypeTimestamp, 0),
   790  		ssbd.stmtType,
   791  		ssbd.schemaName,
   792  		ssbd.digest,
   793  		ssbd.normalizedALLEGROSQL,
   794  		convertEmptyToNil(ssbd.blockNames),
   795  		convertEmptyToNil(strings.Join(ssElement.indexNames, ",")),
   796  		convertEmptyToNil(sampleUser),
   797  		ssElement.execCount,
   798  		ssElement.sumErrors,
   799  		ssElement.sumWarnings,
   800  		int64(ssElement.sumLatency),
   801  		int64(ssElement.maxLatency),
   802  		int64(ssElement.minLatency),
   803  		avgInt(int64(ssElement.sumLatency), ssElement.execCount),
   804  		avgInt(int64(ssElement.sumParseLatency), ssElement.execCount),
   805  		int64(ssElement.maxParseLatency),
   806  		avgInt(int64(ssElement.sumCompileLatency), ssElement.execCount),
   807  		int64(ssElement.maxCompileLatency),
   808  		ssElement.sumNumCausetTasks,
   809  		int64(ssElement.maxCopProcessTime),
   810  		convertEmptyToNil(ssElement.maxCopProcessAddress),
   811  		int64(ssElement.maxCopWaitTime),
   812  		convertEmptyToNil(ssElement.maxCopWaitAddress),
   813  		avgInt(int64(ssElement.sumProcessTime), ssElement.execCount),
   814  		int64(ssElement.maxProcessTime),
   815  		avgInt(int64(ssElement.sumWaitTime), ssElement.execCount),
   816  		int64(ssElement.maxWaitTime),
   817  		avgInt(int64(ssElement.sumBackoffTime), ssElement.execCount),
   818  		int64(ssElement.maxBackoffTime),
   819  		avgInt(ssElement.sumTotalKeys, ssElement.execCount),
   820  		ssElement.maxTotalKeys,
   821  		avgInt(ssElement.sumProcessedKeys, ssElement.execCount),
   822  		ssElement.maxProcessedKeys,
   823  		avgInt(int64(ssElement.sumPrewriteTime), ssElement.commitCount),
   824  		int64(ssElement.maxPrewriteTime),
   825  		avgInt(int64(ssElement.sumCommitTime), ssElement.commitCount),
   826  		int64(ssElement.maxCommitTime),
   827  		avgInt(int64(ssElement.sumGetCommitTsTime), ssElement.commitCount),
   828  		int64(ssElement.maxGetCommitTsTime),
   829  		avgInt(ssElement.sumCommitBackoffTime, ssElement.commitCount),
   830  		ssElement.maxCommitBackoffTime,
   831  		avgInt(ssElement.sumResolveLockTime, ssElement.commitCount),
   832  		ssElement.maxResolveLockTime,
   833  		avgInt(int64(ssElement.sumLocalLatchTime), ssElement.commitCount),
   834  		int64(ssElement.maxLocalLatchTime),
   835  		avgFloat(ssElement.sumWriteKeys, ssElement.commitCount),
   836  		ssElement.maxWriteKeys,
   837  		avgFloat(ssElement.sumWriteSize, ssElement.commitCount),
   838  		ssElement.maxWriteSize,
   839  		avgFloat(ssElement.sumPrewriteRegionNum, ssElement.commitCount),
   840  		int(ssElement.maxPrewriteRegionNum),
   841  		avgFloat(ssElement.sumTxnRetry, ssElement.commitCount),
   842  		ssElement.maxTxnRetry,
   843  		int(ssElement.execRetryCount),
   844  		int64(ssElement.execRetryTime),
   845  		ssElement.sumBackoffTimes,
   846  		formatBackoffTypes(ssElement.backoffTypes),
   847  		avgInt(ssElement.sumMem, ssElement.execCount),
   848  		ssElement.maxMem,
   849  		avgInt(ssElement.sumDisk, ssElement.execCount),
   850  		ssElement.maxDisk,
   851  		avgFloat(int64(ssElement.sumAffectedRows), ssElement.execCount),
   852  		types.NewTime(types.FromGoTime(ssElement.firstSeen), allegrosql.TypeTimestamp, 0),
   853  		types.NewTime(types.FromGoTime(ssElement.lastSeen), allegrosql.TypeTimestamp, 0),
   854  		ssElement.planInCache,
   855  		ssElement.planCacheHits,
   856  		ssElement.sampleALLEGROSQL,
   857  		ssElement.prevALLEGROSQL,
   858  		ssbd.planDigest,
   859  		plan,
   860  	)
   861  }
   862  
   863  // Truncate ALLEGROALLEGROSQL to maxALLEGROSQLLength.
   864  func formatALLEGROSQL(allegrosql string) string {
   865  	maxALLEGROSQLLength := StmtSummaryByDigestMap.maxALLEGROSQLLength()
   866  	length := len(allegrosql)
   867  	if length > maxALLEGROSQLLength {
   868  		allegrosql = fmt.Sprintf("%.*s(len:%d)", maxALLEGROSQLLength, allegrosql, length)
   869  	}
   870  	return allegrosql
   871  }
   872  
   873  // Format the backoffType map to a string or nil.
   874  func formatBackoffTypes(backoffMap map[fmt.Stringer]int) interface{} {
   875  	type backoffStat struct {
   876  		backoffType fmt.Stringer
   877  		count       int
   878  	}
   879  
   880  	size := len(backoffMap)
   881  	if size == 0 {
   882  		return nil
   883  	}
   884  
   885  	backoffArray := make([]backoffStat, 0, len(backoffMap))
   886  	for backoffType, count := range backoffMap {
   887  		backoffArray = append(backoffArray, backoffStat{backoffType, count})
   888  	}
   889  	sort.Slice(backoffArray, func(i, j int) bool {
   890  		return backoffArray[i].count > backoffArray[j].count
   891  	})
   892  
   893  	var buffer bytes.Buffer
   894  	for index, stat := range backoffArray {
   895  		if _, err := fmt.Fprintf(&buffer, "%v:%d", stat.backoffType, stat.count); err != nil {
   896  			return "FORMAT ERROR"
   897  		}
   898  		if index < len(backoffArray)-1 {
   899  			buffer.WriteString(",")
   900  		}
   901  	}
   902  	return buffer.String()
   903  }
   904  
   905  func avgInt(sum int64, count int64) int64 {
   906  	if count > 0 {
   907  		return sum / count
   908  	}
   909  	return 0
   910  }
   911  
   912  func avgFloat(sum int64, count int64) float64 {
   913  	if count > 0 {
   914  		return float64(sum) / float64(count)
   915  	}
   916  	return 0
   917  }
   918  
   919  func convertEmptyToNil(str string) interface{} {
   920  	if str == "" {
   921  		return nil
   922  	}
   923  	return str
   924  }