github.com/wanlay/gorm-dm8@v1.0.5/dmr/g.go (about)

     1  /*
     2   * Copyright (c) 2000-2018, 达梦数据库有限公司.
     3   * All rights reserved.
     4   */
     5  package dmr
     6  
     7  import (
     8  	"fmt"
     9  	"math"
    10  	"os"
    11  	"sort"
    12  	"strconv"
    13  	"strings"
    14  	"sync"
    15  	"sync/atomic"
    16  	"time"
    17  
    18  	"github.com/wanlay/gorm-dm8/dmr/util"
    19  )
    20  
    21  type ExecuteTypeEnum int
    22  
    23  const (
    24  	Execute ExecuteTypeEnum = iota
    25  	ExecuteQuery
    26  	ExecuteUpdate
    27  )
    28  
    29  var idGenerator int64 = 0
    30  
    31  func generateId() string {
    32  	return time.Now().String() + strconv.Itoa(int(atomic.AddInt64(&idGenerator, 1)))
    33  }
    34  
    35  func getInt64(counter *int64, reset bool) int64 {
    36  	if reset {
    37  		return atomic.SwapInt64(counter, 0)
    38  	}
    39  	return atomic.LoadInt64(counter)
    40  }
    41  
    42  type SqlStatValue struct {
    43  	id string
    44  
    45  	sql string
    46  
    47  	sqlHash int64
    48  
    49  	dataSource string
    50  
    51  	dataSourceId string
    52  
    53  	executeLastStartTime int64
    54  
    55  	executeBatchSizeTotal int64
    56  
    57  	executeBatchSizeMax int64
    58  
    59  	executeSuccessCount int64
    60  
    61  	executeSpanNanoTotal int64
    62  
    63  	executeSpanNanoMax int64
    64  
    65  	runningCount int64
    66  
    67  	concurrentMax int64
    68  
    69  	resultSetHoldTimeNano int64
    70  
    71  	executeAndResultSetHoldTime int64
    72  
    73  	executeNanoSpanMaxOccurTime int64
    74  
    75  	executeErrorCount int64
    76  
    77  	executeErrorLast error
    78  
    79  	executeErrorLastMessage string
    80  
    81  	executeErrorLastStackTrace string
    82  
    83  	executeErrorLastTime int64
    84  
    85  	updateCount int64
    86  
    87  	updateCountMax int64
    88  
    89  	fetchRowCount int64
    90  
    91  	fetchRowCountMax int64
    92  
    93  	inTransactionCount int64
    94  
    95  	lastSlowParameters string
    96  
    97  	clobOpenCount int64
    98  
    99  	blobOpenCount int64
   100  
   101  	readStringLength int64
   102  
   103  	readBytesLength int64
   104  
   105  	inputStreamOpenCount int64
   106  
   107  	readerOpenCount int64
   108  
   109  	histogram_0_1 int64
   110  
   111  	histogram_1_10 int64
   112  
   113  	histogram_10_100 int64
   114  
   115  	histogram_100_1000 int64
   116  
   117  	histogram_1000_10000 int64
   118  
   119  	histogram_10000_100000 int64
   120  
   121  	histogram_100000_1000000 int64
   122  
   123  	histogram_1000000_more int64
   124  
   125  	executeAndResultHoldTime_0_1 int64
   126  
   127  	executeAndResultHoldTime_1_10 int64
   128  
   129  	executeAndResultHoldTime_10_100 int64
   130  
   131  	executeAndResultHoldTime_100_1000 int64
   132  
   133  	executeAndResultHoldTime_1000_10000 int64
   134  
   135  	executeAndResultHoldTime_10000_100000 int64
   136  
   137  	executeAndResultHoldTime_100000_1000000 int64
   138  
   139  	executeAndResultHoldTime_1000000_more int64
   140  
   141  	fetchRowCount_0_1 int64
   142  
   143  	fetchRowCount_1_10 int64
   144  
   145  	fetchRowCount_10_100 int64
   146  
   147  	fetchRowCount_100_1000 int64
   148  
   149  	fetchRowCount_1000_10000 int64
   150  
   151  	fetchRowCount_10000_more int64
   152  
   153  	updateCount_0_1 int64
   154  
   155  	updateCount_1_10 int64
   156  
   157  	updateCount_10_100 int64
   158  
   159  	updateCount_100_1000 int64
   160  
   161  	updateCount_1000_10000 int64
   162  
   163  	updateCount_10000_more int64
   164  }
   165  
   166  func newSqlStatValue() *SqlStatValue {
   167  	ssv := new(SqlStatValue)
   168  	return ssv
   169  }
   170  
   171  func (ssv *SqlStatValue) getExecuteHistogram() []int64 {
   172  	return []int64{
   173  		ssv.histogram_0_1,
   174  		ssv.histogram_1_10,
   175  		ssv.histogram_10_100,
   176  		ssv.histogram_100_1000,
   177  		ssv.histogram_1000_10000,
   178  		ssv.histogram_10000_100000,
   179  		ssv.histogram_100000_1000000,
   180  		ssv.histogram_1000000_more,
   181  	}
   182  }
   183  
   184  func (ssv *SqlStatValue) getExecuteAndResultHoldHistogram() []int64 {
   185  	return []int64{ssv.executeAndResultHoldTime_0_1,
   186  		ssv.executeAndResultHoldTime_1_10,
   187  		ssv.executeAndResultHoldTime_10_100,
   188  		ssv.executeAndResultHoldTime_100_1000,
   189  		ssv.executeAndResultHoldTime_1000_10000,
   190  		ssv.executeAndResultHoldTime_10000_100000,
   191  		ssv.executeAndResultHoldTime_100000_1000000,
   192  		ssv.executeAndResultHoldTime_1000000_more,
   193  	}
   194  }
   195  
   196  func (ssv *SqlStatValue) getFetchRowHistogram() []int64 {
   197  	return []int64{ssv.fetchRowCount_0_1,
   198  		ssv.fetchRowCount_1_10,
   199  		ssv.fetchRowCount_10_100,
   200  		ssv.fetchRowCount_100_1000,
   201  		ssv.fetchRowCount_1000_10000,
   202  		ssv.fetchRowCount_10000_more,
   203  	}
   204  }
   205  
   206  func (ssv *SqlStatValue) getUpdateHistogram() []int64 {
   207  	return []int64{ssv.updateCount_0_1,
   208  		ssv.updateCount_1_10,
   209  		ssv.updateCount_10_100,
   210  		ssv.updateCount_100_1000,
   211  		ssv.updateCount_1000_10000,
   212  		ssv.updateCount_10000_more,
   213  	}
   214  }
   215  
   216  func (ssv *SqlStatValue) getExecuteCount() int64 {
   217  	return ssv.executeErrorCount + ssv.executeSuccessCount
   218  }
   219  
   220  func (ssv *SqlStatValue) getExecuteMillisMax() int64 {
   221  	return ssv.executeSpanNanoMax / (1000 * 1000)
   222  }
   223  
   224  func (ssv *SqlStatValue) getExecuteMillisTotal() int64 {
   225  	return ssv.executeSpanNanoTotal / (1000 * 1000)
   226  }
   227  
   228  func (ssv *SqlStatValue) getHistogramValues() []int64 {
   229  	return []int64{
   230  
   231  		ssv.histogram_0_1,
   232  		ssv.histogram_1_10,
   233  		ssv.histogram_10_100,
   234  		ssv.histogram_100_1000,
   235  		ssv.histogram_1000_10000,
   236  		ssv.histogram_10000_100000,
   237  		ssv.histogram_100000_1000000,
   238  		ssv.histogram_1000000_more,
   239  	}
   240  }
   241  
   242  func (ssv *SqlStatValue) getFetchRowCountHistogramValues() []int64 {
   243  	return []int64{
   244  
   245  		ssv.fetchRowCount_0_1,
   246  		ssv.fetchRowCount_1_10,
   247  		ssv.fetchRowCount_10_100,
   248  		ssv.fetchRowCount_100_1000,
   249  		ssv.fetchRowCount_1000_10000,
   250  		ssv.fetchRowCount_10000_more,
   251  	}
   252  }
   253  
   254  func (ssv *SqlStatValue) getUpdateCountHistogramValues() []int64 {
   255  	return []int64{
   256  
   257  		ssv.updateCount_0_1,
   258  		ssv.updateCount_1_10,
   259  		ssv.updateCount_10_100,
   260  		ssv.updateCount_100_1000,
   261  		ssv.updateCount_1000_10000,
   262  		ssv.updateCount_10000_more,
   263  	}
   264  }
   265  
   266  func (ssv *SqlStatValue) getExecuteAndResultHoldTimeHistogramValues() []int64 {
   267  	return []int64{
   268  
   269  		ssv.executeAndResultHoldTime_0_1,
   270  		ssv.executeAndResultHoldTime_1_10,
   271  		ssv.executeAndResultHoldTime_10_100,
   272  		ssv.executeAndResultHoldTime_100_1000,
   273  		ssv.executeAndResultHoldTime_1000_10000,
   274  		ssv.executeAndResultHoldTime_10000_100000,
   275  		ssv.executeAndResultHoldTime_100000_1000000,
   276  		ssv.executeAndResultHoldTime_1000000_more,
   277  	}
   278  }
   279  
   280  func (ssv *SqlStatValue) getResultSetHoldTimeMilis() int64 {
   281  	return ssv.resultSetHoldTimeNano / (1000 * 1000)
   282  }
   283  
   284  func (ssv *SqlStatValue) getExecuteAndResultSetHoldTimeMilis() int64 {
   285  	return ssv.executeAndResultSetHoldTime / (1000 * 1000)
   286  }
   287  
   288  func (ssv *SqlStatValue) getData() map[string]interface{} {
   289  	m := make(map[string]interface{})
   290  
   291  	m[idConstStr] = ssv.id
   292  	m[dataSourceConstStr] = ssv.dataSource
   293  	m["DataSourceId"] = ssv.dataSourceId
   294  	m[sqlConstStr] = ssv.sql
   295  	m[executeCountConstStr] = ssv.getExecuteCount()
   296  	m[errorCountConstStr] = ssv.executeErrorCount
   297  
   298  	m[totalTimeConstStr] = ssv.getExecuteMillisTotal()
   299  	m["LastTime"] = ssv.executeLastStartTime
   300  	m[maxTimespanConstStr] = ssv.getExecuteMillisMax()
   301  	m["LastError"] = ssv.executeErrorLast
   302  	m[effectedRowCountConstStr] = ssv.updateCount
   303  
   304  	m[fetchRowCountConstStr] = ssv.fetchRowCount
   305  	m["MaxTimespanOccurTime"] = ssv.executeNanoSpanMaxOccurTime
   306  	m["BatchSizeMax"] = ssv.executeBatchSizeMax
   307  	m["BatchSizeTotal"] = ssv.executeBatchSizeTotal
   308  	m[concurrentMaxConstStr] = ssv.concurrentMax
   309  
   310  	m[runningCountConstStr] = ssv.runningCount
   311  
   312  	if ssv.executeErrorLastMessage != "" {
   313  		m["LastErrorMessage"] = ssv.executeErrorLastMessage
   314  		m["LastErrorStackTrace"] = ssv.executeErrorLastStackTrace
   315  		m["LastErrorTime"] = ssv.executeErrorLastTime
   316  	} else {
   317  		m["LastErrorMessage"] = ""
   318  		m["LastErrorClass"] = ""
   319  		m["LastErrorStackTrace"] = ""
   320  		m["LastErrorTime"] = ""
   321  	}
   322  
   323  	m[urlConstStr] = ""
   324  	m[inTransactionCountConstStr] = ssv.inTransactionCount
   325  
   326  	m["Histogram"] = ssv.getHistogramValues()
   327  	m["LastSlowParameters"] = ssv.lastSlowParameters
   328  	m["ResultSetHoldTime"] = ssv.getResultSetHoldTimeMilis()
   329  	m["ExecuteAndResultSetHoldTime"] = ssv.getExecuteAndResultSetHoldTimeMilis()
   330  	m[fetchRowCountConstStr] = ssv.getFetchRowCountHistogramValues()
   331  
   332  	m[effectedRowCountHistogramConstStr] = ssv.getUpdateCountHistogramValues()
   333  	m[executeAndResultHoldTimeHistogramConstStr] = ssv.getExecuteAndResultHoldTimeHistogramValues()
   334  	m["EffectedRowCountMax"] = ssv.updateCountMax
   335  	m["FetchRowCountMax"] = ssv.fetchRowCountMax
   336  	m[clobOpenCountConstStr] = ssv.clobOpenCount
   337  
   338  	m[blobOpenCountConstStr] = ssv.blobOpenCount
   339  	m["ReadStringLength"] = ssv.readStringLength
   340  	m["ReadBytesLength"] = ssv.readBytesLength
   341  	m["InputStreamOpenCount"] = ssv.inputStreamOpenCount
   342  	m["ReaderOpenCount"] = ssv.readerOpenCount
   343  
   344  	m["HASH"] = ssv.sqlHash
   345  
   346  	m[executeHoldTimeHistogramConstStr] = ssv.getExecuteHistogram()
   347  
   348  	return m
   349  }
   350  
   351  type sqlStat struct {
   352  	Sql string
   353  
   354  	SqlHash int64
   355  
   356  	Id string
   357  
   358  	ExecuteLastStartTime int64
   359  
   360  	ExecuteBatchSizeTotal int64
   361  
   362  	ExecuteBatchSizeMax int64
   363  
   364  	ExecuteSuccessCount int64
   365  
   366  	ExecuteSpanNanoTotal int64
   367  
   368  	ExecuteSpanNanoMax int64
   369  
   370  	RunningCount int64
   371  
   372  	ConcurrentMax int64
   373  
   374  	ResultSetHoldTimeNano int64
   375  
   376  	ExecuteAndResultSetHoldTime int64
   377  
   378  	DataSource string
   379  
   380  	File string
   381  
   382  	ExecuteNanoSpanMaxOccurTime int64
   383  
   384  	ExecuteErrorCount int64
   385  
   386  	ExecuteErrorLast error
   387  
   388  	ExecuteErrorLastTime int64
   389  
   390  	UpdateCount int64
   391  
   392  	UpdateCountMax int64
   393  
   394  	FetchRowCount int64
   395  
   396  	FetchRowCountMax int64
   397  
   398  	InTransactionCount int64
   399  
   400  	LastSlowParameters string
   401  
   402  	Removed int64
   403  
   404  	ClobOpenCount int64
   405  
   406  	BlobOpenCount int64
   407  
   408  	ReadStringLength int64
   409  
   410  	ReadBytesLength int64
   411  
   412  	InputStreamOpenCount int64
   413  
   414  	ReaderOpenCount int64
   415  
   416  	Histogram_0_1 int64
   417  
   418  	Histogram_1_10 int64
   419  
   420  	Histogram_10_100 int64
   421  
   422  	Histogram_100_1000 int64
   423  
   424  	Histogram_1000_10000 int64
   425  
   426  	Histogram_10000_100000 int64
   427  
   428  	Histogram_100000_1000000 int64
   429  
   430  	Histogram_1000000_more int64
   431  
   432  	ExecuteAndResultHoldTime_0_1 int64
   433  
   434  	ExecuteAndResultHoldTime_1_10 int64
   435  
   436  	ExecuteAndResultHoldTime_10_100 int64
   437  
   438  	ExecuteAndResultHoldTime_100_1000 int64
   439  
   440  	ExecuteAndResultHoldTime_1000_10000 int64
   441  
   442  	ExecuteAndResultHoldTime_10000_100000 int64
   443  
   444  	ExecuteAndResultHoldTime_100000_1000000 int64
   445  
   446  	ExecuteAndResultHoldTime_1000000_more int64
   447  
   448  	FetchRowCount_0_1 int64
   449  
   450  	FetchRowCount_1_10 int64
   451  
   452  	FetchRowCount_10_100 int64
   453  
   454  	FetchRowCount_100_1000 int64
   455  
   456  	FetchRowCount_1000_10000 int64
   457  
   458  	FetchRowCount_10000_more int64
   459  
   460  	UpdateCount_0_1 int64
   461  
   462  	UpdateCount_1_10 int64
   463  
   464  	UpdateCount_10_100 int64
   465  
   466  	UpdateCount_100_1000 int64
   467  
   468  	UpdateCount_1000_10000 int64
   469  
   470  	UpdateCount_10000_more int64
   471  
   472  	DataSourceId string
   473  }
   474  
   475  func NewSqlStat(sql string) *sqlStat {
   476  	s := new(sqlStat)
   477  	s.Sql = sql
   478  	s.Id = "SQL" + generateId()
   479  	return s
   480  }
   481  
   482  func (s *sqlStat) reset() {
   483  	s.ExecuteLastStartTime = 0
   484  
   485  	s.ExecuteBatchSizeTotal = 0
   486  	s.ExecuteBatchSizeMax = 0
   487  
   488  	s.ExecuteSuccessCount = 0
   489  	s.ExecuteSpanNanoTotal = 0
   490  	s.ExecuteSpanNanoMax = 0
   491  	s.ExecuteNanoSpanMaxOccurTime = 0
   492  	s.ConcurrentMax = 0
   493  
   494  	s.ExecuteErrorCount = 0
   495  	s.ExecuteErrorLast = nil
   496  	s.ExecuteErrorLastTime = 0
   497  
   498  	s.UpdateCount = 0
   499  	s.UpdateCountMax = 0
   500  	s.FetchRowCount = 0
   501  	s.FetchRowCountMax = 0
   502  
   503  	s.Histogram_0_1 = 0
   504  	s.Histogram_1_10 = 0
   505  	s.Histogram_10_100 = 0
   506  	s.Histogram_100_1000 = 0
   507  	s.Histogram_1000_10000 = 0
   508  	s.Histogram_10000_100000 = 0
   509  	s.Histogram_100000_1000000 = 0
   510  	s.Histogram_1000000_more = 0
   511  
   512  	s.LastSlowParameters = ""
   513  	s.InTransactionCount = 0
   514  	s.ResultSetHoldTimeNano = 0
   515  	s.ExecuteAndResultSetHoldTime = 0
   516  
   517  	s.FetchRowCount_0_1 = 0
   518  	s.FetchRowCount_1_10 = 0
   519  	s.FetchRowCount_10_100 = 0
   520  	s.FetchRowCount_100_1000 = 0
   521  	s.FetchRowCount_1000_10000 = 0
   522  	s.FetchRowCount_10000_more = 0
   523  
   524  	s.UpdateCount_0_1 = 0
   525  	s.UpdateCount_1_10 = 0
   526  	s.UpdateCount_10_100 = 0
   527  	s.UpdateCount_100_1000 = 0
   528  	s.UpdateCount_1000_10000 = 0
   529  	s.UpdateCount_10000_more = 0
   530  
   531  	s.ExecuteAndResultHoldTime_0_1 = 0
   532  	s.ExecuteAndResultHoldTime_1_10 = 0
   533  	s.ExecuteAndResultHoldTime_10_100 = 0
   534  	s.ExecuteAndResultHoldTime_100_1000 = 0
   535  	s.ExecuteAndResultHoldTime_1000_10000 = 0
   536  	s.ExecuteAndResultHoldTime_10000_100000 = 0
   537  	s.ExecuteAndResultHoldTime_100000_1000000 = 0
   538  	s.ExecuteAndResultHoldTime_1000000_more = 0
   539  
   540  	s.BlobOpenCount = 0
   541  	s.ClobOpenCount = 0
   542  	s.ReadStringLength = 0
   543  	s.ReadBytesLength = 0
   544  	s.InputStreamOpenCount = 0
   545  	s.ReaderOpenCount = 0
   546  }
   547  
   548  func (s *sqlStat) getValueAndReset() *SqlStatValue {
   549  	return s.getValue(true)
   550  }
   551  
   552  func (s *sqlStat) getValue(reset bool) *SqlStatValue {
   553  	ssv := newSqlStatValue()
   554  	ssv.dataSource = s.DataSource
   555  	ssv.dataSourceId = s.DataSourceId
   556  	ssv.sql = s.Sql
   557  	ssv.sqlHash = s.SqlHash
   558  	ssv.id = s.Id
   559  	ssv.executeLastStartTime = s.ExecuteLastStartTime
   560  	if reset {
   561  		s.ExecuteLastStartTime = 0
   562  	}
   563  
   564  	ssv.executeBatchSizeTotal = getInt64(&s.ExecuteBatchSizeTotal, reset)
   565  	ssv.executeBatchSizeMax = getInt64(&s.ExecuteBatchSizeMax, reset)
   566  	ssv.executeSuccessCount = getInt64(&s.ExecuteSuccessCount, reset)
   567  	ssv.executeSpanNanoTotal = getInt64(&s.ExecuteSpanNanoTotal, reset)
   568  	ssv.executeSpanNanoMax = getInt64(&s.ExecuteSpanNanoMax, reset)
   569  	ssv.executeNanoSpanMaxOccurTime = s.ExecuteNanoSpanMaxOccurTime
   570  	if reset {
   571  		s.ExecuteNanoSpanMaxOccurTime = 0
   572  	}
   573  
   574  	ssv.runningCount = s.RunningCount
   575  	ssv.concurrentMax = getInt64(&s.ConcurrentMax, reset)
   576  	ssv.executeErrorCount = getInt64(&s.ExecuteErrorCount, reset)
   577  	ssv.executeErrorLast = s.ExecuteErrorLast
   578  	if reset {
   579  		s.ExecuteErrorLast = nil
   580  	}
   581  
   582  	ssv.executeErrorLastTime = s.ExecuteErrorLastTime
   583  	if reset {
   584  		ssv.executeErrorLastTime = 0
   585  	}
   586  
   587  	ssv.updateCount = getInt64(&s.UpdateCount, reset)
   588  	ssv.updateCountMax = getInt64(&s.UpdateCountMax, reset)
   589  	ssv.fetchRowCount = getInt64(&s.FetchRowCount, reset)
   590  	ssv.fetchRowCountMax = getInt64(&s.FetchRowCountMax, reset)
   591  	ssv.histogram_0_1 = getInt64(&s.Histogram_0_1, reset)
   592  	ssv.histogram_1_10 = getInt64(&s.Histogram_1_10, reset)
   593  	ssv.histogram_10_100 = getInt64(&s.Histogram_10_100, reset)
   594  	ssv.histogram_100_1000 = getInt64(&s.Histogram_100_1000, reset)
   595  	ssv.histogram_1000_10000 = getInt64(&s.Histogram_1000_10000, reset)
   596  	ssv.histogram_10000_100000 = getInt64(&s.Histogram_10000_100000, reset)
   597  	ssv.histogram_100000_1000000 = getInt64(&s.Histogram_100000_1000000, reset)
   598  	ssv.histogram_1000000_more = getInt64(&s.Histogram_1000000_more, reset)
   599  	ssv.lastSlowParameters = s.LastSlowParameters
   600  	if reset {
   601  		s.LastSlowParameters = ""
   602  	}
   603  
   604  	ssv.inTransactionCount = getInt64(&s.InTransactionCount, reset)
   605  	ssv.resultSetHoldTimeNano = getInt64(&s.ResultSetHoldTimeNano, reset)
   606  	ssv.executeAndResultSetHoldTime = getInt64(&s.ExecuteAndResultSetHoldTime, reset)
   607  	ssv.fetchRowCount_0_1 = getInt64(&s.FetchRowCount_0_1, reset)
   608  	ssv.fetchRowCount_1_10 = getInt64(&s.FetchRowCount_1_10, reset)
   609  	ssv.fetchRowCount_10_100 = getInt64(&s.FetchRowCount_10_100, reset)
   610  	ssv.fetchRowCount_100_1000 = getInt64(&s.FetchRowCount_100_1000, reset)
   611  	ssv.fetchRowCount_1000_10000 = getInt64(&s.FetchRowCount_1000_10000, reset)
   612  	ssv.fetchRowCount_10000_more = getInt64(&s.FetchRowCount_10000_more, reset)
   613  	ssv.updateCount_0_1 = getInt64(&s.UpdateCount_0_1, reset)
   614  	ssv.updateCount_1_10 = getInt64(&s.UpdateCount_1_10, reset)
   615  	ssv.updateCount_10_100 = getInt64(&s.UpdateCount_10_100, reset)
   616  	ssv.updateCount_100_1000 = getInt64(&s.UpdateCount_100_1000, reset)
   617  	ssv.updateCount_1000_10000 = getInt64(&s.UpdateCount_1000_10000, reset)
   618  	ssv.updateCount_10000_more = getInt64(&s.UpdateCount_10000_more, reset)
   619  	ssv.executeAndResultHoldTime_0_1 = getInt64(&s.ExecuteAndResultHoldTime_0_1, reset)
   620  	ssv.executeAndResultHoldTime_1_10 = getInt64(&s.ExecuteAndResultHoldTime_1_10, reset)
   621  	ssv.executeAndResultHoldTime_10_100 = getInt64(&s.ExecuteAndResultHoldTime_10_100, reset)
   622  	ssv.executeAndResultHoldTime_100_1000 = getInt64(&s.ExecuteAndResultHoldTime_100_1000, reset)
   623  	ssv.executeAndResultHoldTime_1000_10000 = getInt64(&s.ExecuteAndResultHoldTime_1000_10000, reset)
   624  	ssv.executeAndResultHoldTime_10000_100000 = getInt64(&s.ExecuteAndResultHoldTime_10000_100000, reset)
   625  	ssv.executeAndResultHoldTime_100000_1000000 = getInt64(&s.ExecuteAndResultHoldTime_100000_1000000, reset)
   626  	ssv.executeAndResultHoldTime_1000000_more = getInt64(&s.ExecuteAndResultHoldTime_1000000_more, reset)
   627  	ssv.blobOpenCount = getInt64(&s.BlobOpenCount, reset)
   628  	ssv.clobOpenCount = getInt64(&s.ClobOpenCount, reset)
   629  	ssv.readStringLength = getInt64(&s.ReadStringLength, reset)
   630  	ssv.readBytesLength = getInt64(&s.ReadBytesLength, reset)
   631  	ssv.inputStreamOpenCount = getInt64(&s.InputStreamOpenCount, reset)
   632  	ssv.readerOpenCount = getInt64(&s.ReaderOpenCount, reset)
   633  	return ssv
   634  }
   635  
   636  func (s *sqlStat) addUpdateCount(delta int64) {
   637  	if delta > 0 {
   638  		atomic.AddInt64(&s.UpdateCount, delta)
   639  	}
   640  
   641  	for {
   642  		max := atomic.LoadInt64(&s.UpdateCountMax)
   643  		if delta <= max {
   644  			break
   645  		}
   646  		if atomic.CompareAndSwapInt64(&s.UpdateCountMax, max, delta) {
   647  			break
   648  		}
   649  	}
   650  
   651  	if delta < 1 {
   652  		atomic.AddInt64(&s.UpdateCount_0_1, 1)
   653  	} else if delta < 10 {
   654  		atomic.AddInt64(&s.UpdateCount_1_10, 1)
   655  	} else if delta < 100 {
   656  		atomic.AddInt64(&s.UpdateCount_10_100, 1)
   657  	} else if delta < 1000 {
   658  		atomic.AddInt64(&s.UpdateCount_100_1000, 1)
   659  	} else if delta < 10000 {
   660  		atomic.AddInt64(&s.UpdateCount_1000_10000, 1)
   661  	} else {
   662  		atomic.AddInt64(&s.UpdateCount_10000_more, 1)
   663  	}
   664  }
   665  
   666  func (s *sqlStat) incrementClobOpenCount() {
   667  	atomic.AddInt64(&s.ClobOpenCount, 1)
   668  }
   669  
   670  func (s *sqlStat) incrementBlobOpenCount() {
   671  	atomic.AddInt64(&s.BlobOpenCount, 1)
   672  }
   673  
   674  func (s *sqlStat) addStringReadLength(length int64) {
   675  	atomic.AddInt64(&s.ReadStringLength, length)
   676  }
   677  
   678  func (s *sqlStat) addReadBytesLength(length int64) {
   679  	atomic.AddInt64(&s.ReadBytesLength, length)
   680  }
   681  
   682  func (s *sqlStat) addReaderOpenCount(count int64) {
   683  	atomic.AddInt64(&s.ReaderOpenCount, count)
   684  }
   685  
   686  func (s *sqlStat) addInputStreamOpenCount(count int64) {
   687  	atomic.AddInt64(&s.InputStreamOpenCount, count)
   688  }
   689  
   690  func (s *sqlStat) addFetchRowCount(delta int64) {
   691  	atomic.AddInt64(&s.FetchRowCount, delta)
   692  	for {
   693  		max := atomic.LoadInt64(&s.FetchRowCountMax)
   694  		if delta <= max {
   695  			break
   696  		}
   697  		if atomic.CompareAndSwapInt64(&s.FetchRowCountMax, max, delta) {
   698  			break
   699  		}
   700  	}
   701  
   702  	if delta < 1 {
   703  		atomic.AddInt64(&s.FetchRowCount_0_1, 1)
   704  	} else if delta < 10 {
   705  		atomic.AddInt64(&s.FetchRowCount_1_10, 1)
   706  	} else if delta < 100 {
   707  		atomic.AddInt64(&s.FetchRowCount_10_100, 1)
   708  	} else if delta < 1000 {
   709  		atomic.AddInt64(&s.FetchRowCount_100_1000, 1)
   710  	} else if delta < 10000 {
   711  		atomic.AddInt64(&s.FetchRowCount_1000_10000, 1)
   712  	} else {
   713  		atomic.AddInt64(&s.FetchRowCount_10000_more, 1)
   714  	}
   715  
   716  }
   717  
   718  func (s *sqlStat) addExecuteBatchCount(batchSize int64) {
   719  	atomic.AddInt64(&s.ExecuteBatchSizeTotal, batchSize)
   720  
   721  	for {
   722  		current := atomic.LoadInt64(&s.ExecuteBatchSizeMax)
   723  		if current < batchSize {
   724  			if atomic.CompareAndSwapInt64(&s.ExecuteBatchSizeMax, current, batchSize) {
   725  				break
   726  			} else {
   727  				continue
   728  			}
   729  		} else {
   730  			break
   731  		}
   732  	}
   733  }
   734  
   735  func (s *sqlStat) incrementExecuteSuccessCount() {
   736  	atomic.AddInt64(&s.ExecuteSuccessCount, 1)
   737  }
   738  
   739  func (s *sqlStat) incrementRunningCount() {
   740  	val := atomic.AddInt64(&s.RunningCount, 1)
   741  
   742  	for {
   743  		max := atomic.LoadInt64(&s.ConcurrentMax)
   744  		if val > max {
   745  			if atomic.CompareAndSwapInt64(&s.ConcurrentMax, max, val) {
   746  				break
   747  			} else {
   748  				continue
   749  			}
   750  		} else {
   751  			break
   752  		}
   753  	}
   754  }
   755  
   756  func (s *sqlStat) decrementRunningCount() {
   757  	atomic.AddInt64(&s.RunningCount, -1)
   758  }
   759  
   760  func (s *sqlStat) addExecuteTimeAndResultHoldTimeHistogramRecord(executeType ExecuteTypeEnum, firstResultSet bool, nanoSpan int64, parameters string) {
   761  	s.addExecuteTime(nanoSpan, parameters)
   762  
   763  	if ExecuteQuery != executeType && !firstResultSet {
   764  		s.executeAndResultHoldTimeHistogramRecord(nanoSpan)
   765  	}
   766  }
   767  
   768  func (s *sqlStat) executeAndResultHoldTimeHistogramRecord(nanoSpan int64) {
   769  	millis := nanoSpan / 1000 / 1000
   770  
   771  	if millis < 1 {
   772  		atomic.AddInt64(&s.ExecuteAndResultHoldTime_0_1, 1)
   773  	} else if millis < 10 {
   774  		atomic.AddInt64(&s.ExecuteAndResultHoldTime_1_10, 1)
   775  	} else if millis < 100 {
   776  		atomic.AddInt64(&s.ExecuteAndResultHoldTime_10_100, 1)
   777  	} else if millis < 1000 {
   778  		atomic.AddInt64(&s.ExecuteAndResultHoldTime_100_1000, 1)
   779  	} else if millis < 10000 {
   780  		atomic.AddInt64(&s.ExecuteAndResultHoldTime_1000_10000, 1)
   781  	} else if millis < 100000 {
   782  		atomic.AddInt64(&s.ExecuteAndResultHoldTime_10000_100000, 1)
   783  	} else if millis < 1000000 {
   784  		atomic.AddInt64(&s.ExecuteAndResultHoldTime_100000_1000000, 1)
   785  	} else {
   786  		atomic.AddInt64(&s.ExecuteAndResultHoldTime_1000000_more, 1)
   787  	}
   788  }
   789  
   790  func (s *sqlStat) histogramRecord(nanoSpan int64) {
   791  	millis := nanoSpan / 1000 / 1000
   792  
   793  	if millis < 1 {
   794  		atomic.AddInt64(&s.Histogram_0_1, 1)
   795  	} else if millis < 10 {
   796  		atomic.AddInt64(&s.Histogram_1_10, 1)
   797  	} else if millis < 100 {
   798  		atomic.AddInt64(&s.Histogram_10_100, 1)
   799  	} else if millis < 1000 {
   800  		atomic.AddInt64(&s.Histogram_100_1000, 1)
   801  	} else if millis < 10000 {
   802  		atomic.AddInt64(&s.Histogram_1000_10000, 1)
   803  	} else if millis < 100000 {
   804  		atomic.AddInt64(&s.Histogram_10000_100000, 1)
   805  	} else if millis < 1000000 {
   806  		atomic.AddInt64(&s.Histogram_100000_1000000, 1)
   807  	} else {
   808  		atomic.AddInt64(&s.Histogram_1000000_more, 1)
   809  	}
   810  }
   811  
   812  func (s *sqlStat) addExecuteTime(nanoSpan int64, parameters string) {
   813  	atomic.AddInt64(&s.ExecuteSpanNanoTotal, nanoSpan)
   814  
   815  	for {
   816  		current := atomic.LoadInt64(&s.ExecuteSpanNanoMax)
   817  		if current < nanoSpan {
   818  			if atomic.CompareAndSwapInt64(&s.ExecuteSpanNanoMax, current, nanoSpan) {
   819  
   820  				s.ExecuteNanoSpanMaxOccurTime = time.Now().UnixNano()
   821  				s.LastSlowParameters = parameters
   822  
   823  				break
   824  			} else {
   825  				continue
   826  			}
   827  		} else {
   828  			break
   829  		}
   830  	}
   831  
   832  	s.histogramRecord(nanoSpan)
   833  }
   834  
   835  func (s *sqlStat) getExecuteMillisTotal() int64 {
   836  	return s.ExecuteSpanNanoTotal / (1000 * 1000)
   837  }
   838  
   839  func (s *sqlStat) getExecuteMillisMax() int64 {
   840  	return s.ExecuteSpanNanoMax / (1000 * 1000)
   841  }
   842  
   843  func (s *sqlStat) incrementInTransactionCount() {
   844  	atomic.AddInt64(&s.InTransactionCount, 1)
   845  }
   846  
   847  func (s *sqlStat) getExecuteCount() int64 {
   848  	return s.ExecuteErrorCount + s.ExecuteSuccessCount
   849  }
   850  
   851  func (s *sqlStat) getData() map[string]interface{} {
   852  	return s.getValue(false).getData()
   853  }
   854  
   855  func (s *sqlStat) getHistogramValues() []int64 {
   856  	return []int64{
   857  
   858  		s.Histogram_0_1,
   859  		s.Histogram_1_10,
   860  		s.Histogram_10_100,
   861  		s.Histogram_100_1000,
   862  		s.Histogram_1000_10000,
   863  		s.Histogram_10000_100000,
   864  		s.Histogram_100000_1000000,
   865  		s.Histogram_1000000_more,
   866  	}
   867  }
   868  
   869  func (s *sqlStat) getHistogramSum() int64 {
   870  	values := s.getHistogramValues()
   871  	var sum int64 = 0
   872  	for i := 0; i < len(values); i++ {
   873  		sum += values[i]
   874  	}
   875  	return sum
   876  }
   877  
   878  func (s *sqlStat) error(err error) {
   879  	atomic.AddInt64(&s.ExecuteErrorCount, 1)
   880  	s.ExecuteErrorLastTime = time.Now().UnixNano()
   881  	s.ExecuteErrorLast = err
   882  }
   883  
   884  func (s *sqlStat) getResultSetHoldTimeMilis() int64 {
   885  	return s.ResultSetHoldTimeNano / (1000 * 1000)
   886  }
   887  
   888  func (s *sqlStat) getExecuteAndResultSetHoldTimeMilis() int64 {
   889  	return s.ExecuteAndResultSetHoldTime / (1000 * 1000)
   890  }
   891  
   892  func (s *sqlStat) getFetchRowCountHistogramValues() []int64 {
   893  	return []int64{
   894  
   895  		s.FetchRowCount_0_1,
   896  		s.FetchRowCount_1_10,
   897  		s.FetchRowCount_10_100,
   898  		s.FetchRowCount_100_1000,
   899  		s.FetchRowCount_1000_10000,
   900  		s.FetchRowCount_10000_more,
   901  	}
   902  }
   903  
   904  func (s *sqlStat) getUpdateCountHistogramValues() []int64 {
   905  	return []int64{
   906  
   907  		s.UpdateCount_0_1,
   908  		s.UpdateCount_1_10,
   909  		s.UpdateCount_10_100,
   910  		s.UpdateCount_100_1000,
   911  		s.UpdateCount_1000_10000,
   912  		s.UpdateCount_10000_more,
   913  	}
   914  }
   915  
   916  func (s *sqlStat) getExecuteAndResultHoldTimeHistogramValues() []int64 {
   917  	return []int64{
   918  
   919  		s.ExecuteAndResultHoldTime_0_1,
   920  		s.ExecuteAndResultHoldTime_1_10,
   921  		s.ExecuteAndResultHoldTime_10_100,
   922  		s.ExecuteAndResultHoldTime_100_1000,
   923  		s.ExecuteAndResultHoldTime_1000_10000,
   924  		s.ExecuteAndResultHoldTime_10000_100000,
   925  		s.ExecuteAndResultHoldTime_100000_1000000,
   926  		s.ExecuteAndResultHoldTime_1000000_more,
   927  	}
   928  }
   929  
   930  func (s *sqlStat) getExecuteAndResultHoldTimeHistogramSum() int64 {
   931  	values := s.getExecuteAndResultHoldTimeHistogramValues()
   932  	var sum int64 = 0
   933  	for i := 0; i < len(values); i++ {
   934  		sum += values[i]
   935  	}
   936  	return sum
   937  }
   938  
   939  func (s *sqlStat) addResultSetHoldTimeNano(nano int64) {
   940  	atomic.AddInt64(&s.ResultSetHoldTimeNano, nano)
   941  }
   942  
   943  func (s *sqlStat) addResultSetHoldTimeNano2(statementExecuteNano int64, resultHoldTimeNano int64) {
   944  	atomic.AddInt64(&s.ResultSetHoldTimeNano, resultHoldTimeNano)
   945  	atomic.AddInt64(&s.ExecuteAndResultSetHoldTime, statementExecuteNano+resultHoldTimeNano)
   946  	s.executeAndResultHoldTimeHistogramRecord((statementExecuteNano + resultHoldTimeNano) / 1000 / 1000)
   947  	atomic.AddInt64(&s.UpdateCount_0_1, 1)
   948  }
   949  
   950  type connectionStatValue struct {
   951  	id string
   952  
   953  	url string
   954  
   955  	connCount int64
   956  
   957  	activeConnCount int64
   958  
   959  	maxActiveConnCount int64
   960  
   961  	executeCount int64
   962  
   963  	errorCount int64
   964  
   965  	stmtCount int64
   966  
   967  	activeStmtCount int64
   968  
   969  	maxActiveStmtCount int64
   970  
   971  	commitCount int64
   972  
   973  	rollbackCount int64
   974  
   975  	clobOpenCount int64
   976  
   977  	blobOpenCount int64
   978  
   979  	properties string
   980  }
   981  
   982  func newConnectionStatValue() *connectionStatValue {
   983  	csv := new(connectionStatValue)
   984  	return csv
   985  }
   986  
   987  func (csv *connectionStatValue) getData() map[string]interface{} {
   988  	m := make(map[string]interface{})
   989  	m[idConstStr] = csv.id
   990  	m[urlConstStr] = csv.url
   991  	m[connCountConstStr] = csv.connCount
   992  	m[activeConnCountConstStr] = csv.activeConnCount
   993  	m[maxActiveConnCountConstStr] = csv.maxActiveConnCount
   994  
   995  	m[stmtCountConstStr] = csv.stmtCount
   996  	m[activeStmtCountConstStr] = csv.activeStmtCount
   997  	m[maxActiveStmtCountConstStr] = csv.maxActiveStmtCount
   998  
   999  	m[executeCountConstStr] = csv.executeCount
  1000  	m[errorCountConstStr] = csv.errorCount
  1001  	m[commitCountConstStr] = csv.commitCount
  1002  	m[rollbackCountConstStr] = csv.rollbackCount
  1003  
  1004  	m[clobOpenCountConstStr] = csv.clobOpenCount
  1005  	m[blobOpenCountConstStr] = csv.blobOpenCount
  1006  
  1007  	m[propertiesConstStr] = csv.properties
  1008  	return m
  1009  }
  1010  
  1011  type connectionStat struct {
  1012  	id string
  1013  
  1014  	url string
  1015  
  1016  	connCount int64
  1017  
  1018  	activeConnCount int64
  1019  
  1020  	maxActiveConnCount int64
  1021  
  1022  	executeCount int64
  1023  
  1024  	errorCount int64
  1025  
  1026  	stmtCount int64
  1027  
  1028  	activeStmtCount int64
  1029  
  1030  	maxActiveStmtCount int64
  1031  
  1032  	commitCount int64
  1033  
  1034  	rollbackCount int64
  1035  
  1036  	clobOpenCount int64
  1037  
  1038  	blobOpenCount int64
  1039  
  1040  	sqlStatMap map[string]*sqlStat
  1041  
  1042  	maxSqlSize int
  1043  
  1044  	skipSqlCount int64
  1045  
  1046  	lock sync.RWMutex
  1047  
  1048  	properties string
  1049  }
  1050  
  1051  func newConnectionStat(url string) *connectionStat {
  1052  	cs := new(connectionStat)
  1053  	cs.maxSqlSize = StatSqlMaxCount
  1054  	cs.id = "DS" + generateId()
  1055  	cs.url = url
  1056  	cs.sqlStatMap = make(map[string]*sqlStat, 200)
  1057  	return cs
  1058  }
  1059  
  1060  func (cs *connectionStat) createSqlStat(sql string) *sqlStat {
  1061  	cs.lock.Lock()
  1062  	defer cs.lock.Unlock()
  1063  	sqlStat, ok := cs.sqlStatMap[sql]
  1064  	if !ok {
  1065  		sqlStat := NewSqlStat(sql)
  1066  		sqlStat.DataSource = cs.url
  1067  		sqlStat.DataSourceId = cs.id
  1068  		if cs.putSqlStat(sqlStat) {
  1069  			return sqlStat
  1070  		} else {
  1071  			return nil
  1072  		}
  1073  	}
  1074  
  1075  	return sqlStat
  1076  
  1077  }
  1078  
  1079  func (cs *connectionStat) putSqlStat(sqlStat *sqlStat) bool {
  1080  	if cs.maxSqlSize > 0 && len(cs.sqlStatMap) == cs.maxSqlSize {
  1081  		if StatSqlRemoveMode == STAT_SQL_REMOVE_OLDEST {
  1082  			removeSqlStat := cs.eliminateSqlStat()
  1083  			if removeSqlStat.RunningCount > 0 || removeSqlStat.getExecuteCount() > 0 {
  1084  				atomic.AddInt64(&cs.skipSqlCount, 1)
  1085  			}
  1086  			cs.sqlStatMap[sqlStat.Sql] = sqlStat
  1087  			return true
  1088  		} else {
  1089  			if sqlStat.RunningCount > 0 || sqlStat.getExecuteCount() > 0 {
  1090  				atomic.AddInt64(&cs.skipSqlCount, 1)
  1091  			}
  1092  			return false
  1093  		}
  1094  	} else {
  1095  		cs.sqlStatMap[sqlStat.Sql] = sqlStat
  1096  		return true
  1097  	}
  1098  }
  1099  
  1100  func (cs *connectionStat) eliminateSqlStat() *sqlStat {
  1101  	if cs.maxSqlSize > 0 && len(cs.sqlStatMap) == cs.maxSqlSize {
  1102  		if StatSqlRemoveMode == STAT_SQL_REMOVE_OLDEST {
  1103  			for s, item := range cs.sqlStatMap {
  1104  				if item != nil {
  1105  					delete(cs.sqlStatMap, s)
  1106  					return item
  1107  				}
  1108  			}
  1109  		}
  1110  	}
  1111  	return nil
  1112  }
  1113  
  1114  func (cs *connectionStat) getSqlStatMap() map[string]*sqlStat {
  1115  	m := make(map[string]*sqlStat, len(cs.sqlStatMap))
  1116  	cs.lock.Lock()
  1117  	defer cs.lock.Unlock()
  1118  	for s, item := range cs.sqlStatMap {
  1119  		m[s] = item
  1120  	}
  1121  	return m
  1122  }
  1123  
  1124  func (cs *connectionStat) getSqlStatMapAndReset() []*SqlStatValue {
  1125  	stats := make([]*sqlStat, 0, len(cs.sqlStatMap))
  1126  	cs.lock.Lock()
  1127  	defer cs.lock.Unlock()
  1128  
  1129  	for s, stat := range cs.sqlStatMap {
  1130  
  1131  		if stat.getExecuteCount() == 0 && stat.RunningCount == 0 {
  1132  			stat.Removed = 1
  1133  			delete(cs.sqlStatMap, s)
  1134  		} else {
  1135  			stats = append(stats, stat)
  1136  		}
  1137  	}
  1138  
  1139  	values := make([]*SqlStatValue, 0, len(stats))
  1140  	for _, stat := range stats {
  1141  		value := stat.getValueAndReset()
  1142  		if value.getExecuteCount() == 0 && value.runningCount == 0 {
  1143  			continue
  1144  		}
  1145  		values = append(values, value)
  1146  	}
  1147  	return values
  1148  }
  1149  
  1150  func (cs *connectionStat) incrementConn() {
  1151  	atomic.AddInt64(&cs.connCount, 1)
  1152  	atomic.AddInt64(&cs.activeConnCount, 1)
  1153  	count := atomic.LoadInt64(&cs.activeConnCount)
  1154  	if count > atomic.LoadInt64(&cs.maxActiveConnCount) {
  1155  		atomic.StoreInt64(&cs.maxActiveConnCount, count)
  1156  	}
  1157  }
  1158  
  1159  func (cs *connectionStat) decrementConn() {
  1160  	atomic.AddInt64(&cs.activeConnCount, -1)
  1161  }
  1162  
  1163  func (cs *connectionStat) incrementStmt() {
  1164  	atomic.AddInt64(&cs.stmtCount, 1)
  1165  	atomic.AddInt64(&cs.activeStmtCount, 1)
  1166  	count := atomic.LoadInt64(&cs.activeStmtCount)
  1167  	if count > atomic.LoadInt64(&cs.maxActiveStmtCount) {
  1168  		atomic.StoreInt64(&cs.maxActiveStmtCount, count)
  1169  	}
  1170  }
  1171  
  1172  func (cs *connectionStat) decrementStmt() {
  1173  	atomic.AddInt64(&cs.activeStmtCount, -1)
  1174  }
  1175  
  1176  func (cs *connectionStat) decrementStmtByActiveStmtCount(activeStmtCount int64) {
  1177  	atomic.AddInt64(&cs.activeStmtCount, -activeStmtCount)
  1178  }
  1179  
  1180  func (cs *connectionStat) incrementExecuteCount() {
  1181  	atomic.AddInt64(&cs.executeCount, 1)
  1182  }
  1183  
  1184  func (cs *connectionStat) incrementErrorCount() {
  1185  	atomic.AddInt64(&cs.errorCount, 1)
  1186  }
  1187  
  1188  func (cs *connectionStat) incrementCommitCount() {
  1189  	atomic.AddInt64(&cs.commitCount, 1)
  1190  }
  1191  
  1192  func (cs *connectionStat) incrementRollbackCount() {
  1193  	atomic.AddInt64(&cs.rollbackCount, 1)
  1194  }
  1195  
  1196  func (cs *connectionStat) getValue(reset bool) *connectionStatValue {
  1197  	val := newConnectionStatValue()
  1198  	val.id = cs.id
  1199  	val.url = cs.url
  1200  
  1201  	val.connCount = getInt64(&cs.connCount, reset)
  1202  	val.activeConnCount = getInt64(&cs.activeConnCount, false)
  1203  	val.maxActiveConnCount = getInt64(&cs.maxActiveConnCount, false)
  1204  
  1205  	val.stmtCount = getInt64(&cs.stmtCount, reset)
  1206  	val.activeStmtCount = getInt64(&cs.activeStmtCount, false)
  1207  	val.maxActiveStmtCount = getInt64(&cs.maxActiveStmtCount, false)
  1208  
  1209  	val.commitCount = getInt64(&cs.commitCount, reset)
  1210  	val.rollbackCount = getInt64(&cs.rollbackCount, reset)
  1211  	val.executeCount = getInt64(&cs.executeCount, reset)
  1212  	val.errorCount = getInt64(&cs.errorCount, reset)
  1213  
  1214  	val.blobOpenCount = getInt64(&cs.blobOpenCount, reset)
  1215  	val.clobOpenCount = getInt64(&cs.clobOpenCount, reset)
  1216  
  1217  	val.properties = cs.properties
  1218  	return val
  1219  }
  1220  
  1221  func (cs *connectionStat) getData() map[string]interface{} {
  1222  	return cs.getValue(false).getData()
  1223  }
  1224  
  1225  func (cs *connectionStat) getValueAndReset() *connectionStatValue {
  1226  	return cs.getValue(true)
  1227  }
  1228  
  1229  type GoStat struct {
  1230  	connStatMap map[string]*connectionStat
  1231  
  1232  	lock sync.RWMutex
  1233  
  1234  	maxConnSize int
  1235  
  1236  	skipConnCount int64
  1237  }
  1238  
  1239  func newGoStat(maxConnSize int) *GoStat {
  1240  	gs := new(GoStat)
  1241  	if maxConnSize > 0 {
  1242  		gs.maxConnSize = maxConnSize
  1243  	} else {
  1244  		gs.maxConnSize = 1000
  1245  	}
  1246  
  1247  	gs.connStatMap = make(map[string]*connectionStat, 16)
  1248  	return gs
  1249  }
  1250  
  1251  func (gs *GoStat) createConnStat(conn *DmConnection) *connectionStat {
  1252  	url := conn.dmConnector.host + ":" + strconv.Itoa(int(conn.dmConnector.port))
  1253  	gs.lock.Lock()
  1254  	defer gs.lock.Unlock()
  1255  	connstat, ok := gs.connStatMap[url]
  1256  	if !ok {
  1257  		connstat = newConnectionStat(url)
  1258  
  1259  		remove := len(gs.connStatMap) > gs.maxConnSize
  1260  		if remove && connstat.activeConnCount > 0 {
  1261  			atomic.AddInt64(&gs.skipConnCount, 1)
  1262  		}
  1263  
  1264  		gs.connStatMap[url] = connstat
  1265  	}
  1266  
  1267  	return connstat
  1268  }
  1269  
  1270  func (gs *GoStat) getConnStatMap() map[string]*connectionStat {
  1271  	m := make(map[string]*connectionStat, len(gs.connStatMap))
  1272  	gs.lock.Lock()
  1273  	defer gs.lock.Unlock()
  1274  
  1275  	for s, stat := range gs.connStatMap {
  1276  		m[s] = stat
  1277  	}
  1278  	return m
  1279  }
  1280  
  1281  var sqlRowField = []string{rowNumConstStr, dataSourceConstStr, sqlConstStr, executeCountConstStr,
  1282  	totalTimeConstStr, maxTimespanConstStr, inTransactionCountConstStr, errorCountConstStr, effectedRowCountConstStr,
  1283  	fetchRowCountConstStr, runningCountConstStr, concurrentMaxConstStr, executeHoldTimeHistogramConstStr,
  1284  	executeAndResultHoldTimeHistogramConstStr, fetchRowCountHistogramConstStr, effectedRowCountHistogramConstStr}
  1285  
  1286  var sqlColField = []string{"ID", "DataSource", "SQL", "ExecuteCount",
  1287  	"ErrorCount", "TotalTime", "LastTime", "MaxTimespan", "LastError", "EffectedRowCount",
  1288  	"FetchRowCount", "MaxTimespanOccurTime", "BatchSizeMax", "BatchSizeTotal", "ConcurrentMax",
  1289  	"RunningCount", "Name", "File", "LastErrorMessage", "LastErrorClass", "LastErrorStackTrace",
  1290  	"LastErrorTime", "DbType", "URL", "InTransactionCount", "Histogram", "LastSlowParameters",
  1291  	"ResultSetHoldTime", "ExecuteAndResultSetHoldTime", "FetchRowCountHistogram",
  1292  	"EffectedRowCountHistogram", "ExecuteAndResultHoldTimeHistogram", "EffectedRowCountMax",
  1293  	"FetchRowCountMax", "ClobOpenCount"}
  1294  
  1295  const (
  1296  	rowNumConstStr                            = "rowNum"
  1297  	idConstStr                                = "ID"
  1298  	urlConstStr                               = "Url"
  1299  	connCountConstStr                         = "ConnCount"
  1300  	activeConnCountConstStr                   = "ActiveConnCount"
  1301  	maxActiveConnCountConstStr                = "MaxActiveConnCount"
  1302  	stmtCountConstStr                         = "StmtCount"
  1303  	activeStmtCountConstStr                   = "ActiveStmtCount"
  1304  	maxActiveStmtCountConstStr                = "MaxActiveStmtCount"
  1305  	executeCountConstStr                      = "ExecuteCount"
  1306  	errorCountConstStr                        = "ErrorCount"
  1307  	commitCountConstStr                       = "CommitCount"
  1308  	rollbackCountConstStr                     = "RollbackCount"
  1309  	clobOpenCountConstStr                     = "ClobOpenCount"
  1310  	blobOpenCountConstStr                     = "BlobOpenCount"
  1311  	propertiesConstStr                        = "Properties"
  1312  	dataSourceConstStr                        = "DataSource"
  1313  	sqlConstStr                               = "SQL"
  1314  	totalTimeConstStr                         = "TotalTime"
  1315  	maxTimespanConstStr                       = "MaxTimespan"
  1316  	inTransactionCountConstStr                = "InTransactionCount"
  1317  	effectedRowCountConstStr                  = "EffectedRowCount"
  1318  	fetchRowCountConstStr                     = "FetchRowCount"
  1319  	runningCountConstStr                      = "RunningCount"
  1320  	concurrentMaxConstStr                     = "ConcurrentMax"
  1321  	executeHoldTimeHistogramConstStr          = "ExecuteHoldTimeHistogram"
  1322  	executeAndResultHoldTimeHistogramConstStr = "ExecuteAndResultHoldTimeHistogram"
  1323  	fetchRowCountHistogramConstStr            = "FetchRowCountHistogram"
  1324  	effectedRowCountHistogramConstStr         = "EffectedRowCountHistogram"
  1325  )
  1326  
  1327  var dsRowField = []string{rowNumConstStr, urlConstStr, activeConnCountConstStr,
  1328  	maxActiveConnCountConstStr, activeStmtCountConstStr, maxActiveStmtCountConstStr, executeCountConstStr, errorCountConstStr,
  1329  	commitCountConstStr, rollbackCountConstStr}
  1330  
  1331  var dsColField = []string{"ID", "ConnCount", "ActiveConnCount",
  1332  	"MaxActiveConnCount", "StmtCount", "ActiveStmtCount", "MaxActiveStmtCount", "ExecuteCount",
  1333  	"ErrorCount", "CommitCount", "RollbackCount", "ClobOpenCount", "BlobOpenCount"}
  1334  
  1335  const (
  1336  	PROP_NAME_SORT            = "sort"
  1337  	PROP_NAME_SORT_FIELD      = "field"
  1338  	PROP_NAME_SORT_TYPE       = "direction"
  1339  	PROP_NAME_SEARCH          = "search"
  1340  	PROP_NAME_PAGE_NUM        = "pageNum"
  1341  	PROP_NAME_PAGE_SIZE       = "pageSize"
  1342  	PROP_NAME_PAGE_COUNT      = "pageCount"
  1343  	PROP_NAME_TOTAL_ROW_COUNT = "totalRowCount"
  1344  	PROP_NAME_FLUSH_FREQ      = "flushFreq"
  1345  	PROP_NAME_DATASOURCE_ID   = "dataSourceId"
  1346  	PROP_NAME_SQL_ID          = "sqlId"
  1347  
  1348  	URL_SQL               = "sql"
  1349  	URL_SQL_DETAIL        = "sqlDetail"
  1350  	URL_DATASOURCE        = "dataSource"
  1351  	URL_DATASOURCE_DETAIL = "dataSourceDetail"
  1352  
  1353  	RESULT_CODE_SUCCESS = 1
  1354  	RESULT_CODE_ERROR   = -1
  1355  	DEFAULT_PAGE_NUM    = 1
  1356  	DEFAULT_PAGE_SIZE   = int(INT32_MAX)
  1357  	DEFAULT_ORDER_TYPE  = "asc"
  1358  	DEFAULT_ORDERBY     = "DataSourceId"
  1359  )
  1360  
  1361  type StatReader struct {
  1362  	connStat []map[string]interface{}
  1363  
  1364  	connStatColLens []int
  1365  
  1366  	highFreqSqlStat []map[string]interface{}
  1367  
  1368  	highFreqSqlStatColLens []int
  1369  
  1370  	slowSqlStat []map[string]interface{}
  1371  
  1372  	slowSqlStatColLens []int
  1373  }
  1374  
  1375  func newStatReader() *StatReader {
  1376  	sr := new(StatReader)
  1377  	return sr
  1378  }
  1379  
  1380  func (sr *StatReader) readConnStat(retList []string, maxCount int) (bool, []string) {
  1381  	fields := dsRowField
  1382  	isAppend := false
  1383  	if sr.connStat == nil {
  1384  		sr.connStat = sr.getConnStat("", fields)
  1385  		sr.connStatColLens = calcColLens(sr.connStat, fields, COL_MAX_LEN)
  1386  		isAppend = false
  1387  	} else {
  1388  		isAppend = true
  1389  	}
  1390  	var retContent []map[string]interface{}
  1391  	if maxCount > 0 && len(sr.connStat) > maxCount {
  1392  		retContent = sr.connStat[0:maxCount]
  1393  		sr.connStat = sr.connStat[maxCount:len(sr.connStat)]
  1394  	} else {
  1395  		retContent = sr.connStat
  1396  		sr.connStat = nil
  1397  	}
  1398  	retList = append(retList, sr.getFormattedOutput(retContent, fields, sr.connStatColLens, isAppend))
  1399  	return sr.connStat != nil, retList
  1400  }
  1401  
  1402  func (sr *StatReader) readHighFreqSqlStat(retList []string, maxCount int) (bool, []string) {
  1403  	isAppend := false
  1404  	if sr.highFreqSqlStat == nil {
  1405  		sr.highFreqSqlStat = sr.getHighFreqSqlStat(StatHighFreqSqlCount, -1, sqlRowField)
  1406  		sr.highFreqSqlStatColLens = calcColLens(sr.highFreqSqlStat, sqlRowField, COL_MAX_LEN)
  1407  		isAppend = false
  1408  	} else {
  1409  		isAppend = true
  1410  	}
  1411  	var retContent []map[string]interface{}
  1412  	if maxCount > 0 && len(sr.highFreqSqlStat) > maxCount {
  1413  		retContent = sr.highFreqSqlStat[0:maxCount]
  1414  		sr.highFreqSqlStat = sr.highFreqSqlStat[maxCount:len(sr.highFreqSqlStat)]
  1415  	} else {
  1416  		retContent = sr.highFreqSqlStat
  1417  		sr.highFreqSqlStat = nil
  1418  	}
  1419  	retList = append(retList, sr.getFormattedOutput(retContent, sqlRowField, sr.highFreqSqlStatColLens, isAppend))
  1420  	return sr.highFreqSqlStat != nil, retList
  1421  }
  1422  
  1423  func (sr *StatReader) getHighFreqSqlStat(topCount int, sqlId int,
  1424  	fields []string) []map[string]interface{} {
  1425  	var content []map[string]interface{}
  1426  
  1427  	if topCount != 0 {
  1428  		parameters := NewProperties()
  1429  		parameters.Set(PROP_NAME_SORT_FIELD, "ExecuteCount")
  1430  		parameters.Set(PROP_NAME_SORT_TYPE, "desc")
  1431  		parameters.Set(PROP_NAME_PAGE_NUM, "1")
  1432  		parameters.Set(PROP_NAME_PAGE_SIZE, strconv.Itoa(topCount))
  1433  		content = sr.service(URL_SQL, parameters)
  1434  		if sqlId != -1 {
  1435  			matchedContent := make([]map[string]interface{}, 0)
  1436  			for _, sqlStat := range content {
  1437  				idStr := sqlStat["ID"]
  1438  				if idStr == sqlId {
  1439  					matchedContent = append(matchedContent, sqlStat)
  1440  					break
  1441  				}
  1442  			}
  1443  			content = matchedContent
  1444  		}
  1445  	}
  1446  
  1447  	if content == nil {
  1448  		content = make([]map[string]interface{}, 0)
  1449  	} else {
  1450  		i := 1
  1451  		for _, m := range content {
  1452  			m[rowNumConstStr] = i
  1453  			i++
  1454  		}
  1455  	}
  1456  	content = addTitles(content, fields)
  1457  	return content
  1458  }
  1459  
  1460  func (sr *StatReader) readSlowSqlStat(retList []string, maxCount int) (bool, []string) {
  1461  	isAppend := false
  1462  	if sr.slowSqlStat == nil {
  1463  		sr.slowSqlStat = sr.getSlowSqlStat(StatSlowSqlCount, -1, sqlRowField)
  1464  		sr.slowSqlStatColLens = calcColLens(sr.slowSqlStat, sqlRowField,
  1465  			COL_MAX_LEN)
  1466  		isAppend = false
  1467  	} else {
  1468  		isAppend = true
  1469  	}
  1470  	var retContent []map[string]interface{}
  1471  	if maxCount > 0 && len(sr.slowSqlStat) > maxCount {
  1472  		retContent = sr.slowSqlStat[0:maxCount]
  1473  		sr.slowSqlStat = sr.slowSqlStat[maxCount:len(sr.slowSqlStat)]
  1474  	} else {
  1475  		retContent = sr.slowSqlStat
  1476  		sr.slowSqlStat = nil
  1477  	}
  1478  	retList = append(retList, sr.getFormattedOutput(retContent, sqlRowField, sr.slowSqlStatColLens, isAppend))
  1479  	return sr.slowSqlStat != nil, retList
  1480  }
  1481  
  1482  func (sr *StatReader) getSlowSqlStat(topCount int, sqlId int, fields []string) []map[string]interface{} {
  1483  	var content []map[string]interface{}
  1484  
  1485  	if topCount != 0 {
  1486  		parameters := NewProperties()
  1487  		parameters.Set(PROP_NAME_SORT_FIELD, "MaxTimespan")
  1488  		parameters.Set(PROP_NAME_SORT_TYPE, "desc")
  1489  		parameters.Set(PROP_NAME_PAGE_NUM, "1")
  1490  		parameters.Set(PROP_NAME_PAGE_SIZE, strconv.Itoa(topCount))
  1491  
  1492  		content = sr.service(URL_SQL, parameters)
  1493  		if sqlId != -1 {
  1494  			matchedContent := make([]map[string]interface{}, 0)
  1495  			for _, sqlStat := range content {
  1496  				idStr := sqlStat["ID"]
  1497  				if idStr == sqlId {
  1498  					matchedContent = append(matchedContent, sqlStat)
  1499  					break
  1500  				}
  1501  			}
  1502  			content = matchedContent
  1503  		}
  1504  	}
  1505  
  1506  	if content == nil {
  1507  		content = make([]map[string]interface{}, 0)
  1508  	} else {
  1509  		i := 1
  1510  		for _, m := range content {
  1511  			m["rowNum"] = i
  1512  			i++
  1513  		}
  1514  	}
  1515  	content = addTitles(content, fields)
  1516  	return content
  1517  }
  1518  
  1519  func (sr *StatReader) getConnStat(connId string, fields []string) []map[string]interface{} {
  1520  	content := sr.service(URL_DATASOURCE, nil)
  1521  	if connId != "" {
  1522  		matchedContent := make([]map[string]interface{}, 0)
  1523  		for _, dsStat := range content {
  1524  			idStr := dsStat["Identity"]
  1525  			if connId == idStr {
  1526  				matchedContent = append(matchedContent, dsStat)
  1527  				break
  1528  			}
  1529  		}
  1530  		content = matchedContent
  1531  	}
  1532  	if content == nil {
  1533  		content = make([]map[string]interface{}, 0)
  1534  	} else {
  1535  		i := 1
  1536  		for _, m := range content {
  1537  			m["rowNum"] = i
  1538  			i++
  1539  		}
  1540  	}
  1541  	content = addTitles(content, fields)
  1542  	return content
  1543  }
  1544  
  1545  func (sr *StatReader) getFormattedOutput(content []map[string]interface{}, fields []string, colLens []int,
  1546  	isAppend bool) string {
  1547  	return toTable(content, fields, colLens, true, isAppend)
  1548  }
  1549  
  1550  func (sr *StatReader) parseUrl(url string) *Properties {
  1551  	parameters := NewProperties()
  1552  
  1553  	if url == "" || len(strings.TrimSpace(url)) == 0 {
  1554  		return parameters
  1555  	}
  1556  
  1557  	parametersStr := util.StringUtil.SubstringBetween(url, "?", "")
  1558  	if parametersStr == "" || len(parametersStr) == 0 {
  1559  		return parameters
  1560  	}
  1561  
  1562  	parametersArray := strings.Split(parametersStr, "&")
  1563  
  1564  	for _, parameterStr := range parametersArray {
  1565  		index := strings.Index(parametersStr, "=")
  1566  		if index <= 0 {
  1567  			continue
  1568  		}
  1569  
  1570  		name := parameterStr[0:index]
  1571  		value := parameterStr[index+1:]
  1572  		parameters.Set(name, value)
  1573  	}
  1574  	return parameters
  1575  }
  1576  
  1577  func (sr *StatReader) service(url string, params *Properties) []map[string]interface{} {
  1578  	if params != nil {
  1579  		params.SetProperties(sr.parseUrl(url))
  1580  	} else {
  1581  		params = sr.parseUrl(url)
  1582  	}
  1583  
  1584  	if strings.Index(url, URL_SQL) == 0 {
  1585  		array := sr.getSqlStatList(params)
  1586  		array = sr.comparatorOrderBy(array, params)
  1587  		params.Set(PROP_NAME_FLUSH_FREQ, strconv.Itoa(StatFlushFreq))
  1588  		return array
  1589  	} else if strings.Index(url, URL_SQL_DETAIL) == 0 {
  1590  		array := sr.getSqlStatDetailList(params)
  1591  		return array
  1592  	} else if strings.Index(url, URL_DATASOURCE) == 0 {
  1593  		array := sr.getConnStatList(params)
  1594  		array = sr.comparatorOrderBy(array, params)
  1595  		params.Set(PROP_NAME_FLUSH_FREQ, strconv.Itoa(StatFlushFreq))
  1596  		return array
  1597  	} else if strings.Index(url, URL_DATASOURCE_DETAIL) == 0 {
  1598  		array := sr.getConnStatDetailList(params)
  1599  		return array
  1600  	} else {
  1601  		return nil
  1602  	}
  1603  }
  1604  
  1605  func (sr *StatReader) getSqlStatList(params *Properties) []map[string]interface{} {
  1606  	array := make([]map[string]interface{}, 0)
  1607  	connStatMap := goStat.getConnStatMap()
  1608  	var sqlStatMap map[string]*sqlStat
  1609  	for _, connStat := range connStatMap {
  1610  		sqlStatMap = connStat.getSqlStatMap()
  1611  		for _, sqlStat := range sqlStatMap {
  1612  			data := sqlStat.getData()
  1613  			executeCount := data[executeCountConstStr]
  1614  			runningCount := data[runningCountConstStr]
  1615  			if executeCount == 0 && runningCount == 0 {
  1616  				continue
  1617  			}
  1618  
  1619  			array = append(array, data)
  1620  		}
  1621  	}
  1622  
  1623  	return array
  1624  }
  1625  
  1626  func (sr *StatReader) getSqlStatDetailList(params *Properties) []map[string]interface{} {
  1627  	array := make([]map[string]interface{}, 0)
  1628  	connStatMap := goStat.getConnStatMap()
  1629  	var data *sqlStat
  1630  	sqlId := ""
  1631  	dsId := ""
  1632  	if v := params.GetString(PROP_NAME_SQL_ID, ""); v != "" {
  1633  		sqlId = v
  1634  	}
  1635  	if v := params.GetString(PROP_NAME_DATASOURCE_ID, ""); v != "" {
  1636  		dsId = v
  1637  	}
  1638  	if sqlId != "" && dsId != "" {
  1639  		for _, connStat := range connStatMap {
  1640  			if dsId != connStat.id {
  1641  				continue
  1642  			} else {
  1643  				sqlStatMap := connStat.getSqlStatMap()
  1644  				for _, sqlStat := range sqlStatMap {
  1645  
  1646  					if sqlId == sqlStat.Id {
  1647  						data = sqlStat
  1648  						break
  1649  					}
  1650  				}
  1651  			}
  1652  			break
  1653  		}
  1654  	}
  1655  	if data != nil {
  1656  
  1657  		array = append(array, data.getData())
  1658  
  1659  	}
  1660  	return array
  1661  }
  1662  
  1663  func (sr *StatReader) getConnStatList(params *Properties) []map[string]interface{} {
  1664  	array := make([]map[string]interface{}, 0)
  1665  	connStatMap := goStat.getConnStatMap()
  1666  	id := ""
  1667  	if v := params.GetString(PROP_NAME_DATASOURCE_ID, ""); v != "" {
  1668  		id = v
  1669  	}
  1670  	for _, connStat := range connStatMap {
  1671  		data := connStat.getData()
  1672  
  1673  		connCount := data["ConnCount"]
  1674  
  1675  		if connCount == 0 {
  1676  			continue
  1677  		}
  1678  
  1679  		if id != "" {
  1680  			if id == connStat.id {
  1681  				array = append(array, data)
  1682  				break
  1683  			} else {
  1684  				continue
  1685  			}
  1686  		} else {
  1687  
  1688  			array = append(array, data)
  1689  		}
  1690  
  1691  	}
  1692  	return array
  1693  }
  1694  
  1695  func (sr *StatReader) getConnStatDetailList(params *Properties) []map[string]interface{} {
  1696  	array := make([]map[string]interface{}, 0)
  1697  	var data *connectionStat
  1698  	connStatMap := goStat.getConnStatMap()
  1699  	id := ""
  1700  	if v := params.GetString(PROP_NAME_DATASOURCE_ID, ""); v != "" {
  1701  		id = v
  1702  	}
  1703  	if id != "" {
  1704  		for _, connStat := range connStatMap {
  1705  			if id == connStat.id {
  1706  				data = connStat
  1707  				break
  1708  			}
  1709  		}
  1710  	}
  1711  	if data != nil {
  1712  		dataValue := data.getValue(false)
  1713  		m := make(map[string]interface{}, 2)
  1714  		m["name"] = "数据源"
  1715  		m["value"] = dataValue.url
  1716  		array = append(array, m)
  1717  
  1718  		m = make(map[string]interface{}, 2)
  1719  		m["name"] = "总会话数"
  1720  		m["value"] = dataValue.connCount
  1721  		array = append(array, m)
  1722  
  1723  		m = make(map[string]interface{}, 2)
  1724  		m["name"] = "活动会话数"
  1725  		m["value"] = dataValue.activeConnCount
  1726  		array = append(array, m)
  1727  
  1728  		m = make(map[string]interface{}, 2)
  1729  		m["name"] = "活动会话数峰值"
  1730  		m["value"] = dataValue.maxActiveStmtCount
  1731  		array = append(array, m)
  1732  
  1733  		m = make(map[string]interface{}, 2)
  1734  		m["name"] = "总句柄数"
  1735  		m["value"] = dataValue.stmtCount
  1736  		array = append(array, m)
  1737  
  1738  		m = make(map[string]interface{}, 2)
  1739  		m["name"] = "活动句柄数"
  1740  		m["value"] = dataValue.activeStmtCount
  1741  		array = append(array, m)
  1742  
  1743  		m = make(map[string]interface{}, 2)
  1744  		m["name"] = "活动句柄数峰值"
  1745  		m["value"] = dataValue.maxActiveStmtCount
  1746  		array = append(array, m)
  1747  
  1748  		m = make(map[string]interface{}, 2)
  1749  		m["name"] = "执行次数"
  1750  		m["value"] = dataValue.executeCount
  1751  		array = append(array, m)
  1752  
  1753  		m = make(map[string]interface{}, 2)
  1754  		m["name"] = "执行出错次数"
  1755  		m["value"] = dataValue.errorCount
  1756  		array = append(array, m)
  1757  
  1758  		m = make(map[string]interface{}, 2)
  1759  		m["name"] = "提交次数"
  1760  		m["value"] = dataValue.commitCount
  1761  		array = append(array, m)
  1762  
  1763  		m = make(map[string]interface{}, 2)
  1764  		m["name"] = "回滚次数"
  1765  		m["value"] = dataValue.rollbackCount
  1766  		array = append(array, m)
  1767  
  1768  	}
  1769  	return array
  1770  }
  1771  
  1772  type mapSlice struct {
  1773  	m          []map[string]interface{}
  1774  	isDesc     bool
  1775  	orderByKey string
  1776  }
  1777  
  1778  func newMapSlice(m []map[string]interface{}, isDesc bool, orderByKey string) *mapSlice {
  1779  	ms := new(mapSlice)
  1780  	ms.m = m
  1781  	ms.isDesc = isDesc
  1782  	ms.orderByKey = orderByKey
  1783  	return ms
  1784  }
  1785  
  1786  func (ms mapSlice) Len() int { return len(ms.m) }
  1787  
  1788  func (ms mapSlice) Less(i, j int) bool {
  1789  	m1 := ms.m[i]
  1790  	m2 := ms.m[j]
  1791  	v1 := m1[ms.orderByKey]
  1792  	v2 := m2[ms.orderByKey]
  1793  	if v1 == nil {
  1794  		return true
  1795  	} else if v2 == nil {
  1796  		return false
  1797  	}
  1798  
  1799  	switch v1.(type) {
  1800  	case int64:
  1801  		return v1.(int64) < v2.(int64)
  1802  	case float64:
  1803  		return v1.(float64) < v2.(float64)
  1804  	default:
  1805  		return true
  1806  	}
  1807  }
  1808  
  1809  func (ms mapSlice) Swap(i, j int) {
  1810  	ms.m[i], ms.m[j] = ms.m[j], ms.m[i]
  1811  }
  1812  
  1813  func (sr *StatReader) comparatorOrderBy(array []map[string]interface{}, params *Properties) []map[string]interface{} {
  1814  	if array == nil {
  1815  		array = make([]map[string]interface{}, 0)
  1816  	}
  1817  
  1818  	orderBy := DEFAULT_ORDERBY
  1819  	orderType := DEFAULT_ORDER_TYPE
  1820  	pageNum := DEFAULT_PAGE_NUM
  1821  	pageSize := DEFAULT_PAGE_SIZE
  1822  	if params != nil {
  1823  		if v := params.GetTrimString(PROP_NAME_SORT_FIELD, ""); v != "" {
  1824  			orderBy = v
  1825  		}
  1826  
  1827  		if v := params.GetTrimString(PROP_NAME_SORT_TYPE, ""); v != "" {
  1828  			orderType = v
  1829  		}
  1830  
  1831  		if v := params.GetTrimString(PROP_NAME_PAGE_NUM, ""); v != "" {
  1832  			var err error
  1833  			pageNum, err = strconv.Atoi(v)
  1834  			if err != nil {
  1835  				pageNum = DEFAULT_PAGE_NUM
  1836  			}
  1837  		}
  1838  		if v := params.GetTrimString(PROP_NAME_PAGE_SIZE, ""); v != "" {
  1839  			var err error
  1840  			pageSize, err = strconv.Atoi(v)
  1841  			if err != nil {
  1842  				pageSize = DEFAULT_PAGE_SIZE
  1843  			}
  1844  		}
  1845  	}
  1846  
  1847  	rowCount := len(array)
  1848  	pageCount := int(math.Ceil(float64(rowCount * 1.0 / pageSize)))
  1849  	if pageCount < 1 {
  1850  		pageCount = 1
  1851  	}
  1852  
  1853  	if pageNum > pageCount {
  1854  		pageNum = pageCount
  1855  	}
  1856  
  1857  	if len(array) > 0 {
  1858  
  1859  		if orderBy != "" {
  1860  			sort.Sort(newMapSlice(array, !(DEFAULT_ORDER_TYPE == orderType), orderBy))
  1861  		}
  1862  
  1863  		fromIndex := (pageNum - 1) * pageSize
  1864  
  1865  		toIndex := pageNum * pageSize
  1866  		if toIndex > rowCount {
  1867  			toIndex = rowCount
  1868  		}
  1869  		array = array[fromIndex:toIndex]
  1870  	}
  1871  	sr.resetPageInfo(params, rowCount, pageCount, pageNum)
  1872  	return array
  1873  }
  1874  
  1875  func (sr *StatReader) resetPageInfo(params *Properties, rowCount int, pageCount int, pageNum int) {
  1876  
  1877  	if params != nil {
  1878  		v := params.GetString(PROP_NAME_PAGE_SIZE, "")
  1879  		if v != "" {
  1880  
  1881  			params.Set(PROP_NAME_PAGE_COUNT, strconv.Itoa(pageCount))
  1882  			params.Set(PROP_NAME_TOTAL_ROW_COUNT, strconv.Itoa(rowCount))
  1883  			params.Set(PROP_NAME_PAGE_NUM, strconv.Itoa(pageNum))
  1884  		}
  1885  	}
  1886  }
  1887  
  1888  const COL_MAX_LEN = 32
  1889  
  1890  func calcColLens(objList []map[string]interface{}, fields []string, maxColLen int) []int {
  1891  
  1892  	colLen := 0
  1893  	colVal := ""
  1894  	colLens := make([]int, len(fields))
  1895  	for _, obj := range objList {
  1896  		for i := 0; i < len(fields); i++ {
  1897  			colVal = getColValue(obj[fields[i]])
  1898  			colLen = len(colVal)
  1899  			if colLen > colLens[i] {
  1900  				colLens[i] = colLen
  1901  			}
  1902  		}
  1903  	}
  1904  	if maxColLen > 0 {
  1905  		for i := 0; i < len(fields); i++ {
  1906  			if colLens[i] > maxColLen {
  1907  				colLens[i] = maxColLen
  1908  			}
  1909  		}
  1910  	}
  1911  	return colLens
  1912  }
  1913  
  1914  func addTitles(objList []map[string]interface{}, fields []string) []map[string]interface{} {
  1915  	titleMap := make(map[string]interface{}, len(fields))
  1916  	for i := 0; i < len(fields); i++ {
  1917  		titleMap[fields[i]] = fields[i]
  1918  	}
  1919  
  1920  	dst := append(objList, titleMap)
  1921  	copy(dst[1:], dst[:len(dst)-1])
  1922  	dst[0] = titleMap
  1923  	return dst
  1924  }
  1925  
  1926  func toTable(objList []map[string]interface{}, fields []string, colLens []int,
  1927  	showAll bool, append bool) string {
  1928  	if fields == nil || objList == nil {
  1929  		return ""
  1930  	}
  1931  
  1932  	if colLens == nil {
  1933  		colLens = calcColLens(objList, fields, COL_MAX_LEN)
  1934  	}
  1935  
  1936  	output := &strings.Builder{}
  1937  	if !append {
  1938  		sepLine(output, colLens)
  1939  	}
  1940  
  1941  	for _, obj := range objList {
  1942  		objMore := obj
  1943  		for objMore != nil {
  1944  			objMore = formateLine(output, objMore, fields, colLens, showAll)
  1945  		}
  1946  		sepLine(output, colLens)
  1947  	}
  1948  
  1949  	return output.String()
  1950  }
  1951  
  1952  func formateLine(output *strings.Builder, obj map[string]interface{}, fields []string, colLens []int,
  1953  	showAll bool) map[string]interface{} {
  1954  	hasMore := false
  1955  	objMore := make(map[string]interface{})
  1956  	colLen := 0
  1957  	colVal := ""
  1958  	for i := 0; i < len(fields); i++ {
  1959  		colVal = getColValue(obj[fields[i]])
  1960  
  1961  		colLen = len(colVal)
  1962  		if colLen <= colLens[i] {
  1963  			output.WriteString("|")
  1964  			output.WriteString(colVal)
  1965  			blanks(output, colLens[i]-colLen)
  1966  			if showAll {
  1967  				objMore[fields[i]] = ""
  1968  			}
  1969  		} else {
  1970  			output.WriteString("|")
  1971  			if showAll {
  1972  				output.WriteString(colVal[0:colLens[i]])
  1973  				objMore[fields[i]] = colVal[colLens[i]:]
  1974  				hasMore = true
  1975  			} else {
  1976  				output.WriteString(colVal[0:colLens[i]-3] + "...")
  1977  			}
  1978  		}
  1979  	}
  1980  	output.WriteString("|")
  1981  	output.WriteString(util.StringUtil.LineSeparator())
  1982  
  1983  	if hasMore {
  1984  		return objMore
  1985  	} else {
  1986  		return nil
  1987  	}
  1988  }
  1989  
  1990  func sepLine(output *strings.Builder, colLens []int) {
  1991  	output.WriteString("+")
  1992  	for _, colLen := range colLens {
  1993  		for i := 0; i < colLen; i++ {
  1994  			output.WriteString("+")
  1995  		}
  1996  		output.WriteString("+")
  1997  	}
  1998  	output.WriteString(util.StringUtil.LineSeparator())
  1999  }
  2000  
  2001  func blanks(output *strings.Builder, count int) {
  2002  	for count > 0 {
  2003  		output.WriteString(" ")
  2004  		count--
  2005  	}
  2006  }
  2007  
  2008  func getColValue(colObj interface{}) string {
  2009  	var colVal string
  2010  	if colObj == nil {
  2011  		colVal = ""
  2012  	} else {
  2013  		colVal = fmt.Sprint(colObj)
  2014  	}
  2015  
  2016  	colVal = strings.Replace(colVal, "\t", "", -1)
  2017  	colVal = strings.Replace(colVal, "\n", "", -1)
  2018  	colVal = strings.Replace(colVal, "\r", "", -1)
  2019  
  2020  	return colVal
  2021  }
  2022  
  2023  const (
  2024  	READ_MAX_SIZE = 100
  2025  )
  2026  
  2027  type statFlusher struct {
  2028  	sr         *StatReader
  2029  	logList    []string
  2030  	date       string
  2031  	logFile    *os.File
  2032  	flushFreq  int
  2033  	filePath   string
  2034  	filePrefix string
  2035  	buffer     *Dm_build_1499
  2036  }
  2037  
  2038  func newStatFlusher() *statFlusher {
  2039  	sf := new(statFlusher)
  2040  	sf.sr = newStatReader()
  2041  	sf.logList = make([]string, 0, 32)
  2042  	sf.date = time.Now().Format("2006-01-02")
  2043  	sf.flushFreq = StatFlushFreq
  2044  	sf.filePath = StatDir
  2045  	sf.filePrefix = "dm_go_stat"
  2046  	sf.buffer = Dm_build_1503()
  2047  	return sf
  2048  }
  2049  
  2050  func (sf *statFlusher) isConnStatEnabled() bool {
  2051  	return StatEnable
  2052  }
  2053  
  2054  func (sf *statFlusher) isSlowSqlStatEnabled() bool {
  2055  	return StatEnable
  2056  }
  2057  
  2058  func (sf *statFlusher) isHighFreqSqlStatEnabled() bool {
  2059  	return StatEnable
  2060  }
  2061  
  2062  func (sf *statFlusher) doRun() {
  2063  
  2064  	for {
  2065  		if len(goStat.connStatMap) > 0 {
  2066  			sf.logList = append(sf.logList, time.Now().String())
  2067  			if sf.isConnStatEnabled() {
  2068  				sf.logList = append(sf.logList, "#connection stat")
  2069  				hasMore := true
  2070  				for hasMore {
  2071  					hasMore, sf.logList = sf.sr.readConnStat(sf.logList, READ_MAX_SIZE)
  2072  					sf.writeAndFlush(sf.logList, 0, len(sf.logList))
  2073  					sf.logList = sf.logList[0:0]
  2074  				}
  2075  			}
  2076  			if sf.isHighFreqSqlStatEnabled() {
  2077  				sf.logList = append(sf.logList, "#top "+strconv.Itoa(StatHighFreqSqlCount)+" high freq sql stat")
  2078  				hasMore := true
  2079  				for hasMore {
  2080  					hasMore, sf.logList = sf.sr.readHighFreqSqlStat(sf.logList, READ_MAX_SIZE)
  2081  					sf.writeAndFlush(sf.logList, 0, len(sf.logList))
  2082  					sf.logList = sf.logList[0:0]
  2083  				}
  2084  			}
  2085  			if sf.isSlowSqlStatEnabled() {
  2086  				sf.logList = append(sf.logList, "#top "+strconv.Itoa(StatSlowSqlCount)+" slow sql stat")
  2087  				hasMore := true
  2088  				for hasMore {
  2089  					hasMore, sf.logList = sf.sr.readSlowSqlStat(sf.logList, READ_MAX_SIZE)
  2090  					sf.writeAndFlush(sf.logList, 0, len(sf.logList))
  2091  					sf.logList = sf.logList[0:0]
  2092  				}
  2093  			}
  2094  			sf.logList = append(sf.logList, util.StringUtil.LineSeparator())
  2095  			sf.logList = append(sf.logList, util.StringUtil.LineSeparator())
  2096  			sf.writeAndFlush(sf.logList, 0, len(sf.logList))
  2097  			sf.logList = sf.logList[0:0]
  2098  			time.Sleep(time.Duration(StatFlushFreq) * time.Second)
  2099  		}
  2100  	}
  2101  }
  2102  
  2103  func (sf *statFlusher) writeAndFlush(logs []string, startOff int, l int) {
  2104  	var bytes []byte
  2105  	for i := startOff; i < startOff+l; i++ {
  2106  		bytes = []byte(logs[i] + util.StringUtil.LineSeparator())
  2107  
  2108  		sf.buffer.Dm_build_1525(bytes, 0, len(bytes))
  2109  
  2110  		if sf.buffer.Dm_build_1504() >= FLUSH_SIZE {
  2111  			sf.doFlush(sf.buffer)
  2112  		}
  2113  	}
  2114  
  2115  	if sf.buffer.Dm_build_1504() > 0 {
  2116  		sf.doFlush(sf.buffer)
  2117  	}
  2118  }
  2119  
  2120  func (sf *statFlusher) doFlush(buffer *Dm_build_1499) {
  2121  	if sf.needCreateNewFile() {
  2122  		sf.closeCurrentFile()
  2123  		sf.logFile = sf.createNewFile()
  2124  	}
  2125  	buffer.Dm_build_1519(sf.logFile, buffer.Dm_build_1504())
  2126  }
  2127  func (sf *statFlusher) closeCurrentFile() {
  2128  	if sf.logFile != nil {
  2129  		sf.logFile.Close()
  2130  		sf.logFile = nil
  2131  	}
  2132  }
  2133  func (sf *statFlusher) createNewFile() *os.File {
  2134  	sf.date = time.Now().Format("2006-01-02")
  2135  	fileName := sf.filePrefix + "_" + sf.date + "_" + strconv.Itoa(time.Now().Nanosecond()) + ".txt"
  2136  	sf.filePath = StatDir
  2137  	if len(sf.filePath) > 0 {
  2138  		if _, err := os.Stat(sf.filePath); err != nil {
  2139  			os.MkdirAll(sf.filePath, 0755)
  2140  		}
  2141  		if _, err := os.Stat(sf.filePath + fileName); err != nil {
  2142  			logFile, err := os.Create(sf.filePath + fileName)
  2143  			if err != nil {
  2144  				panic(err)
  2145  			}
  2146  			return logFile
  2147  		}
  2148  	}
  2149  	return nil
  2150  }
  2151  func (sf *statFlusher) needCreateNewFile() bool {
  2152  	now := time.Now().Format("2006-01-02")
  2153  	fileInfo, err := sf.logFile.Stat()
  2154  	return now != sf.date || err != nil || sf.logFile == nil || fileInfo.Size() > int64(MAX_FILE_SIZE)
  2155  }