gitee.com/curryzheng/dm@v0.0.1/h.go (about)

     1  /*
     2   * Copyright (c) 2000-2018, 达梦数据库有限公司.
     3   * All rights reserved.
     4   */
     5  package dm
     6  
     7  import (
     8  	"fmt"
     9  	"gitee.com/curryzheng/dm/util"
    10  	"math"
    11  	"os"
    12  	"sort"
    13  	"strconv"
    14  	"strings"
    15  	"sync"
    16  	"sync/atomic"
    17  	"time"
    18  )
    19  
    20  type ExecuteTypeEnum int
    21  
    22  const (
    23  	Execute ExecuteTypeEnum = iota
    24  	ExecuteQuery
    25  	ExecuteUpdate
    26  )
    27  
    28  var idGenerator int64 = 0
    29  
    30  func generateId() string {
    31  	return time.Now().String() + strconv.Itoa(int(atomic.AddInt64(&idGenerator, 1)))
    32  }
    33  
    34  func getInt64(counter *int64, reset bool) int64 {
    35  	if reset {
    36  		return atomic.SwapInt64(counter, 0)
    37  	}
    38  	return atomic.LoadInt64(counter)
    39  }
    40  
    41  type SqlStatValue struct {
    42  	id string
    43  
    44  	sql string
    45  
    46  	sqlHash int64
    47  
    48  	dataSource string
    49  
    50  	dataSourceId string
    51  
    52  	executeLastStartTime int64
    53  
    54  	executeBatchSizeTotal int64
    55  
    56  	executeBatchSizeMax int64
    57  
    58  	executeSuccessCount int64
    59  
    60  	executeSpanNanoTotal int64
    61  
    62  	executeSpanNanoMax int64
    63  
    64  	runningCount int64
    65  
    66  	concurrentMax int64
    67  
    68  	resultSetHoldTimeNano int64
    69  
    70  	executeAndResultSetHoldTime int64
    71  
    72  	executeNanoSpanMaxOccurTime int64
    73  
    74  	executeErrorCount int64
    75  
    76  	executeErrorLast error
    77  
    78  	executeErrorLastMessage string
    79  
    80  	executeErrorLastStackTrace string
    81  
    82  	executeErrorLastTime int64
    83  
    84  	updateCount int64
    85  
    86  	updateCountMax int64
    87  
    88  	fetchRowCount int64
    89  
    90  	fetchRowCountMax int64
    91  
    92  	inTransactionCount int64
    93  
    94  	lastSlowParameters string
    95  
    96  	clobOpenCount int64
    97  
    98  	blobOpenCount int64
    99  
   100  	readStringLength int64
   101  
   102  	readBytesLength int64
   103  
   104  	inputStreamOpenCount int64
   105  
   106  	readerOpenCount int64
   107  
   108  	histogram_0_1 int64
   109  
   110  	histogram_1_10 int64
   111  
   112  	histogram_10_100 int64
   113  
   114  	histogram_100_1000 int64
   115  
   116  	histogram_1000_10000 int64
   117  
   118  	histogram_10000_100000 int64
   119  
   120  	histogram_100000_1000000 int64
   121  
   122  	histogram_1000000_more int64
   123  
   124  	executeAndResultHoldTime_0_1 int64
   125  
   126  	executeAndResultHoldTime_1_10 int64
   127  
   128  	executeAndResultHoldTime_10_100 int64
   129  
   130  	executeAndResultHoldTime_100_1000 int64
   131  
   132  	executeAndResultHoldTime_1000_10000 int64
   133  
   134  	executeAndResultHoldTime_10000_100000 int64
   135  
   136  	executeAndResultHoldTime_100000_1000000 int64
   137  
   138  	executeAndResultHoldTime_1000000_more int64
   139  
   140  	fetchRowCount_0_1 int64
   141  
   142  	fetchRowCount_1_10 int64
   143  
   144  	fetchRowCount_10_100 int64
   145  
   146  	fetchRowCount_100_1000 int64
   147  
   148  	fetchRowCount_1000_10000 int64
   149  
   150  	fetchRowCount_10000_more int64
   151  
   152  	updateCount_0_1 int64
   153  
   154  	updateCount_1_10 int64
   155  
   156  	updateCount_10_100 int64
   157  
   158  	updateCount_100_1000 int64
   159  
   160  	updateCount_1000_10000 int64
   161  
   162  	updateCount_10000_more int64
   163  }
   164  
   165  func newSqlStatValue() *SqlStatValue {
   166  	ssv := new(SqlStatValue)
   167  	return ssv
   168  }
   169  
   170  func (ssv *SqlStatValue) getExecuteHistogram() []int64 {
   171  	return []int64{
   172  		ssv.histogram_0_1,
   173  		ssv.histogram_1_10,
   174  		ssv.histogram_10_100,
   175  		ssv.histogram_100_1000,
   176  		ssv.histogram_1000_10000,
   177  		ssv.histogram_10000_100000,
   178  		ssv.histogram_100000_1000000,
   179  		ssv.histogram_1000000_more,
   180  	}
   181  }
   182  
   183  func (ssv *SqlStatValue) getExecuteAndResultHoldHistogram() []int64 {
   184  	return []int64{ssv.executeAndResultHoldTime_0_1,
   185  		ssv.executeAndResultHoldTime_1_10,
   186  		ssv.executeAndResultHoldTime_10_100,
   187  		ssv.executeAndResultHoldTime_100_1000,
   188  		ssv.executeAndResultHoldTime_1000_10000,
   189  		ssv.executeAndResultHoldTime_10000_100000,
   190  		ssv.executeAndResultHoldTime_100000_1000000,
   191  		ssv.executeAndResultHoldTime_1000000_more,
   192  	}
   193  }
   194  
   195  func (ssv *SqlStatValue) getFetchRowHistogram() []int64 {
   196  	return []int64{ssv.fetchRowCount_0_1,
   197  		ssv.fetchRowCount_1_10,
   198  		ssv.fetchRowCount_10_100,
   199  		ssv.fetchRowCount_100_1000,
   200  		ssv.fetchRowCount_1000_10000,
   201  		ssv.fetchRowCount_10000_more,
   202  	}
   203  }
   204  
   205  func (ssv *SqlStatValue) getUpdateHistogram() []int64 {
   206  	return []int64{ssv.updateCount_0_1,
   207  		ssv.updateCount_1_10,
   208  		ssv.updateCount_10_100,
   209  		ssv.updateCount_100_1000,
   210  		ssv.updateCount_1000_10000,
   211  		ssv.updateCount_10000_more,
   212  	}
   213  }
   214  
   215  func (ssv *SqlStatValue) getExecuteCount() int64 {
   216  	return ssv.executeErrorCount + ssv.executeSuccessCount
   217  }
   218  
   219  func (ssv *SqlStatValue) getExecuteMillisMax() int64 {
   220  	return ssv.executeSpanNanoMax / (1000 * 1000)
   221  }
   222  
   223  func (ssv *SqlStatValue) getExecuteMillisTotal() int64 {
   224  	return ssv.executeSpanNanoTotal / (1000 * 1000)
   225  }
   226  
   227  func (ssv *SqlStatValue) getHistogramValues() []int64 {
   228  	return []int64{
   229  
   230  		ssv.histogram_0_1,
   231  		ssv.histogram_1_10,
   232  		ssv.histogram_10_100,
   233  		ssv.histogram_100_1000,
   234  		ssv.histogram_1000_10000,
   235  		ssv.histogram_10000_100000,
   236  		ssv.histogram_100000_1000000,
   237  		ssv.histogram_1000000_more,
   238  	}
   239  }
   240  
   241  func (ssv *SqlStatValue) getFetchRowCountHistogramValues() []int64 {
   242  	return []int64{
   243  
   244  		ssv.fetchRowCount_0_1,
   245  		ssv.fetchRowCount_1_10,
   246  		ssv.fetchRowCount_10_100,
   247  		ssv.fetchRowCount_100_1000,
   248  		ssv.fetchRowCount_1000_10000,
   249  		ssv.fetchRowCount_10000_more,
   250  	}
   251  }
   252  
   253  func (ssv *SqlStatValue) getUpdateCountHistogramValues() []int64 {
   254  	return []int64{
   255  
   256  		ssv.updateCount_0_1,
   257  		ssv.updateCount_1_10,
   258  		ssv.updateCount_10_100,
   259  		ssv.updateCount_100_1000,
   260  		ssv.updateCount_1000_10000,
   261  		ssv.updateCount_10000_more,
   262  	}
   263  }
   264  
   265  func (ssv *SqlStatValue) getExecuteAndResultHoldTimeHistogramValues() []int64 {
   266  	return []int64{
   267  
   268  		ssv.executeAndResultHoldTime_0_1,
   269  		ssv.executeAndResultHoldTime_1_10,
   270  		ssv.executeAndResultHoldTime_10_100,
   271  		ssv.executeAndResultHoldTime_100_1000,
   272  		ssv.executeAndResultHoldTime_1000_10000,
   273  		ssv.executeAndResultHoldTime_10000_100000,
   274  		ssv.executeAndResultHoldTime_100000_1000000,
   275  		ssv.executeAndResultHoldTime_1000000_more,
   276  	}
   277  }
   278  
   279  func (ssv *SqlStatValue) getResultSetHoldTimeMilis() int64 {
   280  	return ssv.resultSetHoldTimeNano / (1000 * 1000)
   281  }
   282  
   283  func (ssv *SqlStatValue) getExecuteAndResultSetHoldTimeMilis() int64 {
   284  	return ssv.executeAndResultSetHoldTime / (1000 * 1000)
   285  }
   286  
   287  func (ssv *SqlStatValue) getData() map[string]interface{} {
   288  	m := make(map[string]interface{})
   289  
   290  	m[idConstStr] = ssv.id
   291  	m[dataSourceConstStr] = ssv.dataSource
   292  	m["DataSourceId"] = ssv.dataSourceId
   293  	m[sqlConstStr] = ssv.sql
   294  	m[executeCountConstStr] = ssv.getExecuteCount()
   295  	m[errorCountConstStr] = ssv.executeErrorCount
   296  
   297  	m[totalTimeConstStr] = ssv.getExecuteMillisTotal()
   298  	m["LastTime"] = ssv.executeLastStartTime
   299  	m[maxTimespanConstStr] = ssv.getExecuteMillisMax()
   300  	m["LastError"] = ssv.executeErrorLast
   301  	m[effectedRowCountConstStr] = ssv.updateCount
   302  
   303  	m[fetchRowCountConstStr] = ssv.fetchRowCount
   304  	m["MaxTimespanOccurTime"] = ssv.executeNanoSpanMaxOccurTime
   305  	m["BatchSizeMax"] = ssv.executeBatchSizeMax
   306  	m["BatchSizeTotal"] = ssv.executeBatchSizeTotal
   307  	m[concurrentMaxConstStr] = ssv.concurrentMax
   308  
   309  	m[runningCountConstStr] = ssv.runningCount
   310  
   311  	if ssv.executeErrorLastMessage != "" {
   312  		m["LastErrorMessage"] = ssv.executeErrorLastMessage
   313  		m["LastErrorStackTrace"] = ssv.executeErrorLastStackTrace
   314  		m["LastErrorTime"] = ssv.executeErrorLastTime
   315  	} else {
   316  		m["LastErrorMessage"] = ""
   317  		m["LastErrorClass"] = ""
   318  		m["LastErrorStackTrace"] = ""
   319  		m["LastErrorTime"] = ""
   320  	}
   321  
   322  	m[urlConstStr] = ""
   323  	m[inTransactionCountConstStr] = ssv.inTransactionCount
   324  
   325  	m["Histogram"] = ssv.getHistogramValues()
   326  	m["LastSlowParameters"] = ssv.lastSlowParameters
   327  	m["ResultSetHoldTime"] = ssv.getResultSetHoldTimeMilis()
   328  	m["ExecuteAndResultSetHoldTime"] = ssv.getExecuteAndResultSetHoldTimeMilis()
   329  	m[fetchRowCountConstStr] = ssv.getFetchRowCountHistogramValues()
   330  
   331  	m[effectedRowCountHistogramConstStr] = ssv.getUpdateCountHistogramValues()
   332  	m[executeAndResultHoldTimeHistogramConstStr] = ssv.getExecuteAndResultHoldTimeHistogramValues()
   333  	m["EffectedRowCountMax"] = ssv.updateCountMax
   334  	m["FetchRowCountMax"] = ssv.fetchRowCountMax
   335  	m[clobOpenCountConstStr] = ssv.clobOpenCount
   336  
   337  	m[blobOpenCountConstStr] = ssv.blobOpenCount
   338  	m["ReadStringLength"] = ssv.readStringLength
   339  	m["ReadBytesLength"] = ssv.readBytesLength
   340  	m["InputStreamOpenCount"] = ssv.inputStreamOpenCount
   341  	m["ReaderOpenCount"] = ssv.readerOpenCount
   342  
   343  	m["HASH"] = ssv.sqlHash
   344  
   345  	m[executeHoldTimeHistogramConstStr] = ssv.getExecuteHistogram()
   346  
   347  	return m
   348  }
   349  
   350  type sqlStat struct {
   351  	Sql string
   352  
   353  	SqlHash int64
   354  
   355  	Id string
   356  
   357  	ExecuteLastStartTime int64
   358  
   359  	ExecuteBatchSizeTotal int64
   360  
   361  	ExecuteBatchSizeMax int64
   362  
   363  	ExecuteSuccessCount int64
   364  
   365  	ExecuteSpanNanoTotal int64
   366  
   367  	ExecuteSpanNanoMax int64
   368  
   369  	RunningCount int64
   370  
   371  	ConcurrentMax int64
   372  
   373  	ResultSetHoldTimeNano int64
   374  
   375  	ExecuteAndResultSetHoldTime int64
   376  
   377  	DataSource string
   378  
   379  	File string
   380  
   381  	ExecuteNanoSpanMaxOccurTime int64
   382  
   383  	ExecuteErrorCount int64
   384  
   385  	ExecuteErrorLast error
   386  
   387  	ExecuteErrorLastTime int64
   388  
   389  	UpdateCount int64
   390  
   391  	UpdateCountMax int64
   392  
   393  	FetchRowCount int64
   394  
   395  	FetchRowCountMax int64
   396  
   397  	InTransactionCount int64
   398  
   399  	LastSlowParameters string
   400  
   401  	Removed int64
   402  
   403  	ClobOpenCount int64
   404  
   405  	BlobOpenCount int64
   406  
   407  	ReadStringLength int64
   408  
   409  	ReadBytesLength int64
   410  
   411  	InputStreamOpenCount int64
   412  
   413  	ReaderOpenCount int64
   414  
   415  	Histogram_0_1 int64
   416  
   417  	Histogram_1_10 int64
   418  
   419  	Histogram_10_100 int64
   420  
   421  	Histogram_100_1000 int64
   422  
   423  	Histogram_1000_10000 int64
   424  
   425  	Histogram_10000_100000 int64
   426  
   427  	Histogram_100000_1000000 int64
   428  
   429  	Histogram_1000000_more int64
   430  
   431  	ExecuteAndResultHoldTime_0_1 int64
   432  
   433  	ExecuteAndResultHoldTime_1_10 int64
   434  
   435  	ExecuteAndResultHoldTime_10_100 int64
   436  
   437  	ExecuteAndResultHoldTime_100_1000 int64
   438  
   439  	ExecuteAndResultHoldTime_1000_10000 int64
   440  
   441  	ExecuteAndResultHoldTime_10000_100000 int64
   442  
   443  	ExecuteAndResultHoldTime_100000_1000000 int64
   444  
   445  	ExecuteAndResultHoldTime_1000000_more int64
   446  
   447  	FetchRowCount_0_1 int64
   448  
   449  	FetchRowCount_1_10 int64
   450  
   451  	FetchRowCount_10_100 int64
   452  
   453  	FetchRowCount_100_1000 int64
   454  
   455  	FetchRowCount_1000_10000 int64
   456  
   457  	FetchRowCount_10000_more int64
   458  
   459  	UpdateCount_0_1 int64
   460  
   461  	UpdateCount_1_10 int64
   462  
   463  	UpdateCount_10_100 int64
   464  
   465  	UpdateCount_100_1000 int64
   466  
   467  	UpdateCount_1000_10000 int64
   468  
   469  	UpdateCount_10000_more int64
   470  
   471  	DataSourceId string
   472  }
   473  
   474  func NewSqlStat(sql string) *sqlStat {
   475  	s := new(sqlStat)
   476  	s.Sql = sql
   477  	s.Id = "SQL" + generateId()
   478  	return s
   479  }
   480  
   481  func (s *sqlStat) reset() {
   482  	s.ExecuteLastStartTime = 0
   483  
   484  	s.ExecuteBatchSizeTotal = 0
   485  	s.ExecuteBatchSizeMax = 0
   486  
   487  	s.ExecuteSuccessCount = 0
   488  	s.ExecuteSpanNanoTotal = 0
   489  	s.ExecuteSpanNanoMax = 0
   490  	s.ExecuteNanoSpanMaxOccurTime = 0
   491  	s.ConcurrentMax = 0
   492  
   493  	s.ExecuteErrorCount = 0
   494  	s.ExecuteErrorLast = nil
   495  	s.ExecuteErrorLastTime = 0
   496  
   497  	s.UpdateCount = 0
   498  	s.UpdateCountMax = 0
   499  	s.FetchRowCount = 0
   500  	s.FetchRowCountMax = 0
   501  
   502  	s.Histogram_0_1 = 0
   503  	s.Histogram_1_10 = 0
   504  	s.Histogram_10_100 = 0
   505  	s.Histogram_100_1000 = 0
   506  	s.Histogram_1000_10000 = 0
   507  	s.Histogram_10000_100000 = 0
   508  	s.Histogram_100000_1000000 = 0
   509  	s.Histogram_1000000_more = 0
   510  
   511  	s.LastSlowParameters = ""
   512  	s.InTransactionCount = 0
   513  	s.ResultSetHoldTimeNano = 0
   514  	s.ExecuteAndResultSetHoldTime = 0
   515  
   516  	s.FetchRowCount_0_1 = 0
   517  	s.FetchRowCount_1_10 = 0
   518  	s.FetchRowCount_10_100 = 0
   519  	s.FetchRowCount_100_1000 = 0
   520  	s.FetchRowCount_1000_10000 = 0
   521  	s.FetchRowCount_10000_more = 0
   522  
   523  	s.UpdateCount_0_1 = 0
   524  	s.UpdateCount_1_10 = 0
   525  	s.UpdateCount_10_100 = 0
   526  	s.UpdateCount_100_1000 = 0
   527  	s.UpdateCount_1000_10000 = 0
   528  	s.UpdateCount_10000_more = 0
   529  
   530  	s.ExecuteAndResultHoldTime_0_1 = 0
   531  	s.ExecuteAndResultHoldTime_1_10 = 0
   532  	s.ExecuteAndResultHoldTime_10_100 = 0
   533  	s.ExecuteAndResultHoldTime_100_1000 = 0
   534  	s.ExecuteAndResultHoldTime_1000_10000 = 0
   535  	s.ExecuteAndResultHoldTime_10000_100000 = 0
   536  	s.ExecuteAndResultHoldTime_100000_1000000 = 0
   537  	s.ExecuteAndResultHoldTime_1000000_more = 0
   538  
   539  	s.BlobOpenCount = 0
   540  	s.ClobOpenCount = 0
   541  	s.ReadStringLength = 0
   542  	s.ReadBytesLength = 0
   543  	s.InputStreamOpenCount = 0
   544  	s.ReaderOpenCount = 0
   545  }
   546  
   547  func (s *sqlStat) getValueAndReset() *SqlStatValue {
   548  	return s.getValue(true)
   549  }
   550  
   551  func (s *sqlStat) getValue(reset bool) *SqlStatValue {
   552  	ssv := newSqlStatValue()
   553  	ssv.dataSource = s.DataSource
   554  	ssv.dataSourceId = s.DataSourceId
   555  	ssv.sql = s.Sql
   556  	ssv.sqlHash = s.SqlHash
   557  	ssv.id = s.Id
   558  	ssv.executeLastStartTime = s.ExecuteLastStartTime
   559  	if reset {
   560  		s.ExecuteLastStartTime = 0
   561  	}
   562  
   563  	ssv.executeBatchSizeTotal = getInt64(&s.ExecuteBatchSizeTotal, reset)
   564  	ssv.executeBatchSizeMax = getInt64(&s.ExecuteBatchSizeMax, reset)
   565  	ssv.executeSuccessCount = getInt64(&s.ExecuteSuccessCount, reset)
   566  	ssv.executeSpanNanoTotal = getInt64(&s.ExecuteSpanNanoTotal, reset)
   567  	ssv.executeSpanNanoMax = getInt64(&s.ExecuteSpanNanoMax, reset)
   568  	ssv.executeNanoSpanMaxOccurTime = s.ExecuteNanoSpanMaxOccurTime
   569  	if reset {
   570  		s.ExecuteNanoSpanMaxOccurTime = 0
   571  	}
   572  
   573  	ssv.runningCount = s.RunningCount
   574  	ssv.concurrentMax = getInt64(&s.ConcurrentMax, reset)
   575  	ssv.executeErrorCount = getInt64(&s.ExecuteErrorCount, reset)
   576  	ssv.executeErrorLast = s.ExecuteErrorLast
   577  	if reset {
   578  		s.ExecuteErrorLast = nil
   579  	}
   580  
   581  	ssv.executeErrorLastTime = s.ExecuteErrorLastTime
   582  	if reset {
   583  		ssv.executeErrorLastTime = 0
   584  	}
   585  
   586  	ssv.updateCount = getInt64(&s.UpdateCount, reset)
   587  	ssv.updateCountMax = getInt64(&s.UpdateCountMax, reset)
   588  	ssv.fetchRowCount = getInt64(&s.FetchRowCount, reset)
   589  	ssv.fetchRowCountMax = getInt64(&s.FetchRowCountMax, reset)
   590  	ssv.histogram_0_1 = getInt64(&s.Histogram_0_1, reset)
   591  	ssv.histogram_1_10 = getInt64(&s.Histogram_1_10, reset)
   592  	ssv.histogram_10_100 = getInt64(&s.Histogram_10_100, reset)
   593  	ssv.histogram_100_1000 = getInt64(&s.Histogram_100_1000, reset)
   594  	ssv.histogram_1000_10000 = getInt64(&s.Histogram_1000_10000, reset)
   595  	ssv.histogram_10000_100000 = getInt64(&s.Histogram_10000_100000, reset)
   596  	ssv.histogram_100000_1000000 = getInt64(&s.Histogram_100000_1000000, reset)
   597  	ssv.histogram_1000000_more = getInt64(&s.Histogram_1000000_more, reset)
   598  	ssv.lastSlowParameters = s.LastSlowParameters
   599  	if reset {
   600  		s.LastSlowParameters = ""
   601  	}
   602  
   603  	ssv.inTransactionCount = getInt64(&s.InTransactionCount, reset)
   604  	ssv.resultSetHoldTimeNano = getInt64(&s.ResultSetHoldTimeNano, reset)
   605  	ssv.executeAndResultSetHoldTime = getInt64(&s.ExecuteAndResultSetHoldTime, reset)
   606  	ssv.fetchRowCount_0_1 = getInt64(&s.FetchRowCount_0_1, reset)
   607  	ssv.fetchRowCount_1_10 = getInt64(&s.FetchRowCount_1_10, reset)
   608  	ssv.fetchRowCount_10_100 = getInt64(&s.FetchRowCount_10_100, reset)
   609  	ssv.fetchRowCount_100_1000 = getInt64(&s.FetchRowCount_100_1000, reset)
   610  	ssv.fetchRowCount_1000_10000 = getInt64(&s.FetchRowCount_1000_10000, reset)
   611  	ssv.fetchRowCount_10000_more = getInt64(&s.FetchRowCount_10000_more, reset)
   612  	ssv.updateCount_0_1 = getInt64(&s.UpdateCount_0_1, reset)
   613  	ssv.updateCount_1_10 = getInt64(&s.UpdateCount_1_10, reset)
   614  	ssv.updateCount_10_100 = getInt64(&s.UpdateCount_10_100, reset)
   615  	ssv.updateCount_100_1000 = getInt64(&s.UpdateCount_100_1000, reset)
   616  	ssv.updateCount_1000_10000 = getInt64(&s.UpdateCount_1000_10000, reset)
   617  	ssv.updateCount_10000_more = getInt64(&s.UpdateCount_10000_more, reset)
   618  	ssv.executeAndResultHoldTime_0_1 = getInt64(&s.ExecuteAndResultHoldTime_0_1, reset)
   619  	ssv.executeAndResultHoldTime_1_10 = getInt64(&s.ExecuteAndResultHoldTime_1_10, reset)
   620  	ssv.executeAndResultHoldTime_10_100 = getInt64(&s.ExecuteAndResultHoldTime_10_100, reset)
   621  	ssv.executeAndResultHoldTime_100_1000 = getInt64(&s.ExecuteAndResultHoldTime_100_1000, reset)
   622  	ssv.executeAndResultHoldTime_1000_10000 = getInt64(&s.ExecuteAndResultHoldTime_1000_10000, reset)
   623  	ssv.executeAndResultHoldTime_10000_100000 = getInt64(&s.ExecuteAndResultHoldTime_10000_100000, reset)
   624  	ssv.executeAndResultHoldTime_100000_1000000 = getInt64(&s.ExecuteAndResultHoldTime_100000_1000000, reset)
   625  	ssv.executeAndResultHoldTime_1000000_more = getInt64(&s.ExecuteAndResultHoldTime_1000000_more, reset)
   626  	ssv.blobOpenCount = getInt64(&s.BlobOpenCount, reset)
   627  	ssv.clobOpenCount = getInt64(&s.ClobOpenCount, reset)
   628  	ssv.readStringLength = getInt64(&s.ReadStringLength, reset)
   629  	ssv.readBytesLength = getInt64(&s.ReadBytesLength, reset)
   630  	ssv.inputStreamOpenCount = getInt64(&s.InputStreamOpenCount, reset)
   631  	ssv.readerOpenCount = getInt64(&s.ReaderOpenCount, reset)
   632  	return ssv
   633  }
   634  
   635  func (s *sqlStat) addUpdateCount(delta int64) {
   636  	if delta > 0 {
   637  		atomic.AddInt64(&s.UpdateCount, delta)
   638  	}
   639  
   640  	for {
   641  		max := atomic.LoadInt64(&s.UpdateCountMax)
   642  		if delta <= max {
   643  			break
   644  		}
   645  		if atomic.CompareAndSwapInt64(&s.UpdateCountMax, max, delta) {
   646  			break
   647  		}
   648  	}
   649  
   650  	if delta < 1 {
   651  		atomic.AddInt64(&s.UpdateCount_0_1, 1)
   652  	} else if delta < 10 {
   653  		atomic.AddInt64(&s.UpdateCount_1_10, 1)
   654  	} else if delta < 100 {
   655  		atomic.AddInt64(&s.UpdateCount_10_100, 1)
   656  	} else if delta < 1000 {
   657  		atomic.AddInt64(&s.UpdateCount_100_1000, 1)
   658  	} else if delta < 10000 {
   659  		atomic.AddInt64(&s.UpdateCount_1000_10000, 1)
   660  	} else {
   661  		atomic.AddInt64(&s.UpdateCount_10000_more, 1)
   662  	}
   663  }
   664  
   665  func (s *sqlStat) incrementClobOpenCount() {
   666  	atomic.AddInt64(&s.ClobOpenCount, 1)
   667  }
   668  
   669  func (s *sqlStat) incrementBlobOpenCount() {
   670  	atomic.AddInt64(&s.BlobOpenCount, 1)
   671  }
   672  
   673  func (s *sqlStat) addStringReadLength(length int64) {
   674  	atomic.AddInt64(&s.ReadStringLength, length)
   675  }
   676  
   677  func (s *sqlStat) addReadBytesLength(length int64) {
   678  	atomic.AddInt64(&s.ReadBytesLength, length)
   679  }
   680  
   681  func (s *sqlStat) addReaderOpenCount(count int64) {
   682  	atomic.AddInt64(&s.ReaderOpenCount, count)
   683  }
   684  
   685  func (s *sqlStat) addInputStreamOpenCount(count int64) {
   686  	atomic.AddInt64(&s.InputStreamOpenCount, count)
   687  }
   688  
   689  func (s *sqlStat) addFetchRowCount(delta int64) {
   690  	atomic.AddInt64(&s.FetchRowCount, delta)
   691  	for {
   692  		max := atomic.LoadInt64(&s.FetchRowCountMax)
   693  		if delta <= max {
   694  			break
   695  		}
   696  		if atomic.CompareAndSwapInt64(&s.FetchRowCountMax, max, delta) {
   697  			break
   698  		}
   699  	}
   700  
   701  	if delta < 1 {
   702  		atomic.AddInt64(&s.FetchRowCount_0_1, 1)
   703  	} else if delta < 10 {
   704  		atomic.AddInt64(&s.FetchRowCount_1_10, 1)
   705  	} else if delta < 100 {
   706  		atomic.AddInt64(&s.FetchRowCount_10_100, 1)
   707  	} else if delta < 1000 {
   708  		atomic.AddInt64(&s.FetchRowCount_100_1000, 1)
   709  	} else if delta < 10000 {
   710  		atomic.AddInt64(&s.FetchRowCount_1000_10000, 1)
   711  	} else {
   712  		atomic.AddInt64(&s.FetchRowCount_10000_more, 1)
   713  	}
   714  
   715  }
   716  
   717  func (s *sqlStat) addExecuteBatchCount(batchSize int64) {
   718  	atomic.AddInt64(&s.ExecuteBatchSizeTotal, batchSize)
   719  
   720  	for {
   721  		current := atomic.LoadInt64(&s.ExecuteBatchSizeMax)
   722  		if current < batchSize {
   723  			if atomic.CompareAndSwapInt64(&s.ExecuteBatchSizeMax, current, batchSize) {
   724  				break
   725  			} else {
   726  				continue
   727  			}
   728  		} else {
   729  			break
   730  		}
   731  	}
   732  }
   733  
   734  func (s *sqlStat) incrementExecuteSuccessCount() {
   735  	atomic.AddInt64(&s.ExecuteSuccessCount, 1)
   736  }
   737  
   738  func (s *sqlStat) incrementRunningCount() {
   739  	val := atomic.AddInt64(&s.RunningCount, 1)
   740  
   741  	for {
   742  		max := atomic.LoadInt64(&s.ConcurrentMax)
   743  		if val > max {
   744  			if atomic.CompareAndSwapInt64(&s.ConcurrentMax, max, val) {
   745  				break
   746  			} else {
   747  				continue
   748  			}
   749  		} else {
   750  			break
   751  		}
   752  	}
   753  }
   754  
   755  func (s *sqlStat) decrementRunningCount() {
   756  	atomic.AddInt64(&s.RunningCount, -1)
   757  }
   758  
   759  func (s *sqlStat) addExecuteTimeAndResultHoldTimeHistogramRecord(executeType ExecuteTypeEnum, firstResultSet bool, nanoSpan int64, parameters string) {
   760  	s.addExecuteTime(nanoSpan, parameters)
   761  
   762  	if ExecuteQuery != executeType && !firstResultSet {
   763  		s.executeAndResultHoldTimeHistogramRecord(nanoSpan)
   764  	}
   765  }
   766  
   767  func (s *sqlStat) executeAndResultHoldTimeHistogramRecord(nanoSpan int64) {
   768  	millis := nanoSpan / 1000 / 1000
   769  
   770  	if millis < 1 {
   771  		atomic.AddInt64(&s.ExecuteAndResultHoldTime_0_1, 1)
   772  	} else if millis < 10 {
   773  		atomic.AddInt64(&s.ExecuteAndResultHoldTime_1_10, 1)
   774  	} else if millis < 100 {
   775  		atomic.AddInt64(&s.ExecuteAndResultHoldTime_10_100, 1)
   776  	} else if millis < 1000 {
   777  		atomic.AddInt64(&s.ExecuteAndResultHoldTime_100_1000, 1)
   778  	} else if millis < 10000 {
   779  		atomic.AddInt64(&s.ExecuteAndResultHoldTime_1000_10000, 1)
   780  	} else if millis < 100000 {
   781  		atomic.AddInt64(&s.ExecuteAndResultHoldTime_10000_100000, 1)
   782  	} else if millis < 1000000 {
   783  		atomic.AddInt64(&s.ExecuteAndResultHoldTime_100000_1000000, 1)
   784  	} else {
   785  		atomic.AddInt64(&s.ExecuteAndResultHoldTime_1000000_more, 1)
   786  	}
   787  }
   788  
   789  func (s *sqlStat) histogramRecord(nanoSpan int64) {
   790  	millis := nanoSpan / 1000 / 1000
   791  
   792  	if millis < 1 {
   793  		atomic.AddInt64(&s.Histogram_0_1, 1)
   794  	} else if millis < 10 {
   795  		atomic.AddInt64(&s.Histogram_1_10, 1)
   796  	} else if millis < 100 {
   797  		atomic.AddInt64(&s.Histogram_10_100, 1)
   798  	} else if millis < 1000 {
   799  		atomic.AddInt64(&s.Histogram_100_1000, 1)
   800  	} else if millis < 10000 {
   801  		atomic.AddInt64(&s.Histogram_1000_10000, 1)
   802  	} else if millis < 100000 {
   803  		atomic.AddInt64(&s.Histogram_10000_100000, 1)
   804  	} else if millis < 1000000 {
   805  		atomic.AddInt64(&s.Histogram_100000_1000000, 1)
   806  	} else {
   807  		atomic.AddInt64(&s.Histogram_1000000_more, 1)
   808  	}
   809  }
   810  
   811  func (s *sqlStat) addExecuteTime(nanoSpan int64, parameters string) {
   812  	atomic.AddInt64(&s.ExecuteSpanNanoTotal, nanoSpan)
   813  
   814  	for {
   815  		current := atomic.LoadInt64(&s.ExecuteSpanNanoMax)
   816  		if current < nanoSpan {
   817  			if atomic.CompareAndSwapInt64(&s.ExecuteSpanNanoMax, current, nanoSpan) {
   818  
   819  				s.ExecuteNanoSpanMaxOccurTime = time.Now().UnixNano()
   820  				s.LastSlowParameters = parameters
   821  
   822  				break
   823  			} else {
   824  				continue
   825  			}
   826  		} else {
   827  			break
   828  		}
   829  	}
   830  
   831  	s.histogramRecord(nanoSpan)
   832  }
   833  
   834  func (s *sqlStat) getExecuteMillisTotal() int64 {
   835  	return s.ExecuteSpanNanoTotal / (1000 * 1000)
   836  }
   837  
   838  func (s *sqlStat) getExecuteMillisMax() int64 {
   839  	return s.ExecuteSpanNanoMax / (1000 * 1000)
   840  }
   841  
   842  func (s *sqlStat) incrementInTransactionCount() {
   843  	atomic.AddInt64(&s.InTransactionCount, 1)
   844  }
   845  
   846  func (s *sqlStat) getExecuteCount() int64 {
   847  	return s.ExecuteErrorCount + s.ExecuteSuccessCount
   848  }
   849  
   850  func (s *sqlStat) getData() map[string]interface{} {
   851  	return s.getValue(false).getData()
   852  }
   853  
   854  func (s *sqlStat) getHistogramValues() []int64 {
   855  	return []int64{
   856  
   857  		s.Histogram_0_1,
   858  		s.Histogram_1_10,
   859  		s.Histogram_10_100,
   860  		s.Histogram_100_1000,
   861  		s.Histogram_1000_10000,
   862  		s.Histogram_10000_100000,
   863  		s.Histogram_100000_1000000,
   864  		s.Histogram_1000000_more,
   865  	}
   866  }
   867  
   868  func (s *sqlStat) getHistogramSum() int64 {
   869  	values := s.getHistogramValues()
   870  	var sum int64 = 0
   871  	for i := 0; i < len(values); i++ {
   872  		sum += values[i]
   873  	}
   874  	return sum
   875  }
   876  
   877  func (s *sqlStat) error(err error) {
   878  	atomic.AddInt64(&s.ExecuteErrorCount, 1)
   879  	s.ExecuteErrorLastTime = time.Now().UnixNano()
   880  	s.ExecuteErrorLast = err
   881  }
   882  
   883  func (s *sqlStat) getResultSetHoldTimeMilis() int64 {
   884  	return s.ResultSetHoldTimeNano / (1000 * 1000)
   885  }
   886  
   887  func (s *sqlStat) getExecuteAndResultSetHoldTimeMilis() int64 {
   888  	return s.ExecuteAndResultSetHoldTime / (1000 * 1000)
   889  }
   890  
   891  func (s *sqlStat) getFetchRowCountHistogramValues() []int64 {
   892  	return []int64{
   893  
   894  		s.FetchRowCount_0_1,
   895  		s.FetchRowCount_1_10,
   896  		s.FetchRowCount_10_100,
   897  		s.FetchRowCount_100_1000,
   898  		s.FetchRowCount_1000_10000,
   899  		s.FetchRowCount_10000_more,
   900  	}
   901  }
   902  
   903  func (s *sqlStat) getUpdateCountHistogramValues() []int64 {
   904  	return []int64{
   905  
   906  		s.UpdateCount_0_1,
   907  		s.UpdateCount_1_10,
   908  		s.UpdateCount_10_100,
   909  		s.UpdateCount_100_1000,
   910  		s.UpdateCount_1000_10000,
   911  		s.UpdateCount_10000_more,
   912  	}
   913  }
   914  
   915  func (s *sqlStat) getExecuteAndResultHoldTimeHistogramValues() []int64 {
   916  	return []int64{
   917  
   918  		s.ExecuteAndResultHoldTime_0_1,
   919  		s.ExecuteAndResultHoldTime_1_10,
   920  		s.ExecuteAndResultHoldTime_10_100,
   921  		s.ExecuteAndResultHoldTime_100_1000,
   922  		s.ExecuteAndResultHoldTime_1000_10000,
   923  		s.ExecuteAndResultHoldTime_10000_100000,
   924  		s.ExecuteAndResultHoldTime_100000_1000000,
   925  		s.ExecuteAndResultHoldTime_1000000_more,
   926  	}
   927  }
   928  
   929  func (s *sqlStat) getExecuteAndResultHoldTimeHistogramSum() int64 {
   930  	values := s.getExecuteAndResultHoldTimeHistogramValues()
   931  	var sum int64 = 0
   932  	for i := 0; i < len(values); i++ {
   933  		sum += values[i]
   934  	}
   935  	return sum
   936  }
   937  
   938  func (s *sqlStat) addResultSetHoldTimeNano(nano int64) {
   939  	atomic.AddInt64(&s.ResultSetHoldTimeNano, nano)
   940  }
   941  
   942  func (s *sqlStat) addResultSetHoldTimeNano2(statementExecuteNano int64, resultHoldTimeNano int64) {
   943  	atomic.AddInt64(&s.ResultSetHoldTimeNano, resultHoldTimeNano)
   944  	atomic.AddInt64(&s.ExecuteAndResultSetHoldTime, statementExecuteNano+resultHoldTimeNano)
   945  	s.executeAndResultHoldTimeHistogramRecord((statementExecuteNano + resultHoldTimeNano) / 1000 / 1000)
   946  	atomic.AddInt64(&s.UpdateCount_0_1, 1)
   947  }
   948  
   949  type connectionStatValue struct {
   950  	id string
   951  
   952  	url string
   953  
   954  	connCount int64
   955  
   956  	activeConnCount int64
   957  
   958  	maxActiveConnCount int64
   959  
   960  	executeCount int64
   961  
   962  	errorCount int64
   963  
   964  	stmtCount int64
   965  
   966  	activeStmtCount int64
   967  
   968  	maxActiveStmtCount int64
   969  
   970  	commitCount int64
   971  
   972  	rollbackCount int64
   973  
   974  	clobOpenCount int64
   975  
   976  	blobOpenCount int64
   977  
   978  	properties string
   979  }
   980  
   981  func newConnectionStatValue() *connectionStatValue {
   982  	csv := new(connectionStatValue)
   983  	return csv
   984  }
   985  
   986  func (csv *connectionStatValue) getData() map[string]interface{} {
   987  	m := make(map[string]interface{})
   988  	m[idConstStr] = csv.id
   989  	m[urlConstStr] = csv.url
   990  	m[connCountConstStr] = csv.connCount
   991  	m[activeConnCountConstStr] = csv.activeConnCount
   992  	m[maxActiveConnCountConstStr] = csv.maxActiveConnCount
   993  
   994  	m[stmtCountConstStr] = csv.stmtCount
   995  	m[activeStmtCountConstStr] = csv.activeStmtCount
   996  	m[maxActiveStmtCountConstStr] = csv.maxActiveStmtCount
   997  
   998  	m[executeCountConstStr] = csv.executeCount
   999  	m[errorCountConstStr] = csv.errorCount
  1000  	m[commitCountConstStr] = csv.commitCount
  1001  	m[rollbackCountConstStr] = csv.rollbackCount
  1002  
  1003  	m[clobOpenCountConstStr] = csv.clobOpenCount
  1004  	m[blobOpenCountConstStr] = csv.blobOpenCount
  1005  
  1006  	m[propertiesConstStr] = csv.properties
  1007  	return m
  1008  }
  1009  
  1010  type connectionStat struct {
  1011  	id string
  1012  
  1013  	url string
  1014  
  1015  	connCount int64
  1016  
  1017  	activeConnCount int64
  1018  
  1019  	maxActiveConnCount int64
  1020  
  1021  	executeCount int64
  1022  
  1023  	errorCount int64
  1024  
  1025  	stmtCount int64
  1026  
  1027  	activeStmtCount int64
  1028  
  1029  	maxActiveStmtCount int64
  1030  
  1031  	commitCount int64
  1032  
  1033  	rollbackCount int64
  1034  
  1035  	clobOpenCount int64
  1036  
  1037  	blobOpenCount int64
  1038  
  1039  	sqlStatMap map[string]*sqlStat
  1040  
  1041  	maxSqlSize int
  1042  
  1043  	skipSqlCount int64
  1044  
  1045  	lock sync.RWMutex
  1046  
  1047  	properties string
  1048  }
  1049  
  1050  func newConnectionStat(url string) *connectionStat {
  1051  	cs := new(connectionStat)
  1052  	cs.maxSqlSize = StatSqlMaxCount
  1053  	cs.id = "DS" + generateId()
  1054  	cs.url = url
  1055  	cs.sqlStatMap = make(map[string]*sqlStat, 200)
  1056  	return cs
  1057  }
  1058  
  1059  func (cs *connectionStat) createSqlStat(sql string) *sqlStat {
  1060  	cs.lock.Lock()
  1061  	defer cs.lock.Unlock()
  1062  	sqlStat, ok := cs.sqlStatMap[sql]
  1063  	if !ok {
  1064  		sqlStat := NewSqlStat(sql)
  1065  		sqlStat.DataSource = cs.url
  1066  		sqlStat.DataSourceId = cs.id
  1067  		if cs.putSqlStat(sqlStat) {
  1068  			return sqlStat
  1069  		} else {
  1070  			return nil
  1071  		}
  1072  	}
  1073  
  1074  	return sqlStat
  1075  
  1076  }
  1077  
  1078  func (cs *connectionStat) putSqlStat(sqlStat *sqlStat) bool {
  1079  	if cs.maxSqlSize > 0 && len(cs.sqlStatMap) == cs.maxSqlSize {
  1080  		if StatSqlRemoveMode == STAT_SQL_REMOVE_OLDEST {
  1081  			removeSqlStat := cs.eliminateSqlStat()
  1082  			if removeSqlStat.RunningCount > 0 || removeSqlStat.getExecuteCount() > 0 {
  1083  				atomic.AddInt64(&cs.skipSqlCount, 1)
  1084  			}
  1085  			cs.sqlStatMap[sqlStat.Sql] = sqlStat
  1086  			return true
  1087  		} else {
  1088  			if sqlStat.RunningCount > 0 || sqlStat.getExecuteCount() > 0 {
  1089  				atomic.AddInt64(&cs.skipSqlCount, 1)
  1090  			}
  1091  			return false
  1092  		}
  1093  	} else {
  1094  		cs.sqlStatMap[sqlStat.Sql] = sqlStat
  1095  		return true
  1096  	}
  1097  }
  1098  
  1099  func (cs *connectionStat) eliminateSqlStat() *sqlStat {
  1100  	if cs.maxSqlSize > 0 && len(cs.sqlStatMap) == cs.maxSqlSize {
  1101  		if StatSqlRemoveMode == STAT_SQL_REMOVE_OLDEST {
  1102  			for s, item := range cs.sqlStatMap {
  1103  				if item != nil {
  1104  					delete(cs.sqlStatMap, s)
  1105  					return item
  1106  				}
  1107  			}
  1108  		}
  1109  	}
  1110  	return nil
  1111  }
  1112  
  1113  func (cs *connectionStat) getSqlStatMap() map[string]*sqlStat {
  1114  	m := make(map[string]*sqlStat, len(cs.sqlStatMap))
  1115  	cs.lock.Lock()
  1116  	defer cs.lock.Unlock()
  1117  	for s, item := range cs.sqlStatMap {
  1118  		m[s] = item
  1119  	}
  1120  	return m
  1121  }
  1122  
  1123  func (cs *connectionStat) getSqlStatMapAndReset() []*SqlStatValue {
  1124  	stats := make([]*sqlStat, 0, len(cs.sqlStatMap))
  1125  	cs.lock.Lock()
  1126  	defer cs.lock.Unlock()
  1127  
  1128  	for s, stat := range cs.sqlStatMap {
  1129  
  1130  		if stat.getExecuteCount() == 0 && stat.RunningCount == 0 {
  1131  			stat.Removed = 1
  1132  			delete(cs.sqlStatMap, s)
  1133  		} else {
  1134  			stats = append(stats, stat)
  1135  		}
  1136  	}
  1137  
  1138  	values := make([]*SqlStatValue, 0, len(stats))
  1139  	for _, stat := range stats {
  1140  		value := stat.getValueAndReset()
  1141  		if value.getExecuteCount() == 0 && value.runningCount == 0 {
  1142  			continue
  1143  		}
  1144  		values = append(values, value)
  1145  	}
  1146  	return values
  1147  }
  1148  
  1149  func (cs *connectionStat) incrementConn() {
  1150  	atomic.AddInt64(&cs.connCount, 1)
  1151  	atomic.AddInt64(&cs.activeConnCount, 1)
  1152  	count := atomic.LoadInt64(&cs.activeConnCount)
  1153  	if count > atomic.LoadInt64(&cs.maxActiveConnCount) {
  1154  		atomic.StoreInt64(&cs.maxActiveConnCount, count)
  1155  	}
  1156  }
  1157  
  1158  func (cs *connectionStat) decrementConn() {
  1159  	atomic.AddInt64(&cs.activeConnCount, -1)
  1160  }
  1161  
  1162  func (cs *connectionStat) incrementStmt() {
  1163  	atomic.AddInt64(&cs.stmtCount, 1)
  1164  	atomic.AddInt64(&cs.activeStmtCount, 1)
  1165  	count := atomic.LoadInt64(&cs.activeStmtCount)
  1166  	if count > atomic.LoadInt64(&cs.maxActiveStmtCount) {
  1167  		atomic.StoreInt64(&cs.maxActiveStmtCount, count)
  1168  	}
  1169  }
  1170  
  1171  func (cs *connectionStat) decrementStmt() {
  1172  	atomic.AddInt64(&cs.activeStmtCount, -1)
  1173  }
  1174  
  1175  func (cs *connectionStat) decrementStmtByActiveStmtCount(activeStmtCount int64) {
  1176  	atomic.AddInt64(&cs.activeStmtCount, -activeStmtCount)
  1177  }
  1178  
  1179  func (cs *connectionStat) incrementExecuteCount() {
  1180  	atomic.AddInt64(&cs.executeCount, 1)
  1181  }
  1182  
  1183  func (cs *connectionStat) incrementErrorCount() {
  1184  	atomic.AddInt64(&cs.errorCount, 1)
  1185  }
  1186  
  1187  func (cs *connectionStat) incrementCommitCount() {
  1188  	atomic.AddInt64(&cs.commitCount, 1)
  1189  }
  1190  
  1191  func (cs *connectionStat) incrementRollbackCount() {
  1192  	atomic.AddInt64(&cs.rollbackCount, 1)
  1193  }
  1194  
  1195  func (cs *connectionStat) getValue(reset bool) *connectionStatValue {
  1196  	val := newConnectionStatValue()
  1197  	val.id = cs.id
  1198  	val.url = cs.url
  1199  
  1200  	val.connCount = getInt64(&cs.connCount, reset)
  1201  	val.activeConnCount = getInt64(&cs.activeConnCount, false)
  1202  	val.maxActiveConnCount = getInt64(&cs.maxActiveConnCount, false)
  1203  
  1204  	val.stmtCount = getInt64(&cs.stmtCount, reset)
  1205  	val.activeStmtCount = getInt64(&cs.activeStmtCount, false)
  1206  	val.maxActiveStmtCount = getInt64(&cs.maxActiveStmtCount, false)
  1207  
  1208  	val.commitCount = getInt64(&cs.commitCount, reset)
  1209  	val.rollbackCount = getInt64(&cs.rollbackCount, reset)
  1210  	val.executeCount = getInt64(&cs.executeCount, reset)
  1211  	val.errorCount = getInt64(&cs.errorCount, reset)
  1212  
  1213  	val.blobOpenCount = getInt64(&cs.blobOpenCount, reset)
  1214  	val.clobOpenCount = getInt64(&cs.clobOpenCount, reset)
  1215  
  1216  	val.properties = cs.properties
  1217  	return val
  1218  }
  1219  
  1220  func (cs *connectionStat) getData() map[string]interface{} {
  1221  	return cs.getValue(false).getData()
  1222  }
  1223  
  1224  func (cs *connectionStat) getValueAndReset() *connectionStatValue {
  1225  	return cs.getValue(true)
  1226  }
  1227  
  1228  type GoStat struct {
  1229  	connStatMap map[string]*connectionStat
  1230  
  1231  	lock sync.RWMutex
  1232  
  1233  	maxConnSize int
  1234  
  1235  	skipConnCount int64
  1236  }
  1237  
  1238  func newGoStat(maxConnSize int) *GoStat {
  1239  	gs := new(GoStat)
  1240  	if maxConnSize > 0 {
  1241  		gs.maxConnSize = maxConnSize
  1242  	} else {
  1243  		gs.maxConnSize = 1000
  1244  	}
  1245  
  1246  	gs.connStatMap = make(map[string]*connectionStat, 16)
  1247  	return gs
  1248  }
  1249  
  1250  func (gs *GoStat) createConnStat(conn *DmConnection) *connectionStat {
  1251  	url := conn.dmConnector.host + ":" + strconv.Itoa(int(conn.dmConnector.port))
  1252  	gs.lock.Lock()
  1253  	defer gs.lock.Unlock()
  1254  	connstat, ok := gs.connStatMap[url]
  1255  	if !ok {
  1256  		connstat = newConnectionStat(url)
  1257  
  1258  		remove := len(gs.connStatMap) > gs.maxConnSize
  1259  		if remove && connstat.activeConnCount > 0 {
  1260  			atomic.AddInt64(&gs.skipConnCount, 1)
  1261  		}
  1262  
  1263  		gs.connStatMap[url] = connstat
  1264  	}
  1265  
  1266  	return connstat
  1267  }
  1268  
  1269  func (gs *GoStat) getConnStatMap() map[string]*connectionStat {
  1270  	m := make(map[string]*connectionStat, len(gs.connStatMap))
  1271  	gs.lock.Lock()
  1272  	defer gs.lock.Unlock()
  1273  
  1274  	for s, stat := range gs.connStatMap {
  1275  		m[s] = stat
  1276  	}
  1277  	return m
  1278  }
  1279  
  1280  var sqlRowField = []string{rowNumConstStr, dataSourceConstStr, sqlConstStr, executeCountConstStr,
  1281  	totalTimeConstStr, maxTimespanConstStr, inTransactionCountConstStr, errorCountConstStr, effectedRowCountConstStr,
  1282  	fetchRowCountConstStr, runningCountConstStr, concurrentMaxConstStr, executeHoldTimeHistogramConstStr,
  1283  	executeAndResultHoldTimeHistogramConstStr, fetchRowCountHistogramConstStr, effectedRowCountHistogramConstStr}
  1284  
  1285  var sqlColField = []string{"ID", "DataSource", "SQL", "ExecuteCount",
  1286  	"ErrorCount", "TotalTime", "LastTime", "MaxTimespan", "LastError", "EffectedRowCount",
  1287  	"FetchRowCount", "MaxTimespanOccurTime", "BatchSizeMax", "BatchSizeTotal", "ConcurrentMax",
  1288  	"RunningCount", "Name", "File", "LastErrorMessage", "LastErrorClass", "LastErrorStackTrace",
  1289  	"LastErrorTime", "DbType", "URL", "InTransactionCount", "Histogram", "LastSlowParameters",
  1290  	"ResultSetHoldTime", "ExecuteAndResultSetHoldTime", "FetchRowCountHistogram",
  1291  	"EffectedRowCountHistogram", "ExecuteAndResultHoldTimeHistogram", "EffectedRowCountMax",
  1292  	"FetchRowCountMax", "ClobOpenCount"}
  1293  
  1294  const (
  1295  	rowNumConstStr                            = "rowNum"
  1296  	idConstStr                                = "ID"
  1297  	urlConstStr                               = "Url"
  1298  	connCountConstStr                         = "ConnCount"
  1299  	activeConnCountConstStr                   = "ActiveConnCount"
  1300  	maxActiveConnCountConstStr                = "MaxActiveConnCount"
  1301  	stmtCountConstStr                         = "StmtCount"
  1302  	activeStmtCountConstStr                   = "ActiveStmtCount"
  1303  	maxActiveStmtCountConstStr                = "MaxActiveStmtCount"
  1304  	executeCountConstStr                      = "ExecuteCount"
  1305  	errorCountConstStr                        = "ErrorCount"
  1306  	commitCountConstStr                       = "CommitCount"
  1307  	rollbackCountConstStr                     = "RollbackCount"
  1308  	clobOpenCountConstStr                     = "ClobOpenCount"
  1309  	blobOpenCountConstStr                     = "BlobOpenCount"
  1310  	propertiesConstStr                        = "Properties"
  1311  	dataSourceConstStr                        = "DataSource"
  1312  	sqlConstStr                               = "SQL"
  1313  	totalTimeConstStr                         = "TotalTime"
  1314  	maxTimespanConstStr                       = "MaxTimespan"
  1315  	inTransactionCountConstStr                = "InTransactionCount"
  1316  	effectedRowCountConstStr                  = "EffectedRowCount"
  1317  	fetchRowCountConstStr                     = "FetchRowCount"
  1318  	runningCountConstStr                      = "RunningCount"
  1319  	concurrentMaxConstStr                     = "ConcurrentMax"
  1320  	executeHoldTimeHistogramConstStr          = "ExecuteHoldTimeHistogram"
  1321  	executeAndResultHoldTimeHistogramConstStr = "ExecuteAndResultHoldTimeHistogram"
  1322  	fetchRowCountHistogramConstStr            = "FetchRowCountHistogram"
  1323  	effectedRowCountHistogramConstStr         = "EffectedRowCountHistogram"
  1324  )
  1325  
  1326  var dsRowField = []string{rowNumConstStr, urlConstStr, activeConnCountConstStr,
  1327  	maxActiveConnCountConstStr, activeStmtCountConstStr, maxActiveStmtCountConstStr, executeCountConstStr, errorCountConstStr,
  1328  	commitCountConstStr, rollbackCountConstStr}
  1329  
  1330  var dsColField = []string{"ID", "ConnCount", "ActiveConnCount",
  1331  	"MaxActiveConnCount", "StmtCount", "ActiveStmtCount", "MaxActiveStmtCount", "ExecuteCount",
  1332  	"ErrorCount", "CommitCount", "RollbackCount", "ClobOpenCount", "BlobOpenCount"}
  1333  
  1334  const (
  1335  	PROP_NAME_SORT            = "sort"
  1336  	PROP_NAME_SORT_FIELD      = "field"
  1337  	PROP_NAME_SORT_TYPE       = "direction"
  1338  	PROP_NAME_SEARCH          = "search"
  1339  	PROP_NAME_PAGE_NUM        = "pageNum"
  1340  	PROP_NAME_PAGE_SIZE       = "pageSize"
  1341  	PROP_NAME_PAGE_COUNT      = "pageCount"
  1342  	PROP_NAME_TOTAL_ROW_COUNT = "totalRowCount"
  1343  	PROP_NAME_FLUSH_FREQ      = "flushFreq"
  1344  	PROP_NAME_DATASOURCE_ID   = "dataSourceId"
  1345  	PROP_NAME_SQL_ID          = "sqlId"
  1346  
  1347  	URL_SQL               = "sql"
  1348  	URL_SQL_DETAIL        = "sqlDetail"
  1349  	URL_DATASOURCE        = "dataSource"
  1350  	URL_DATASOURCE_DETAIL = "dataSourceDetail"
  1351  
  1352  	RESULT_CODE_SUCCESS = 1
  1353  	RESULT_CODE_ERROR   = -1
  1354  	DEFAULT_PAGE_NUM    = 1
  1355  	DEFAULT_PAGE_SIZE   = int(INT32_MAX)
  1356  	DEFAULT_ORDER_TYPE  = "asc"
  1357  	DEFAULT_ORDERBY     = "DataSourceId"
  1358  )
  1359  
  1360  type StatReader struct {
  1361  	connStat []map[string]interface{}
  1362  
  1363  	connStatColLens []int
  1364  
  1365  	highFreqSqlStat []map[string]interface{}
  1366  
  1367  	highFreqSqlStatColLens []int
  1368  
  1369  	slowSqlStat []map[string]interface{}
  1370  
  1371  	slowSqlStatColLens []int
  1372  }
  1373  
  1374  func newStatReader() *StatReader {
  1375  	sr := new(StatReader)
  1376  	return sr
  1377  }
  1378  
  1379  func (sr *StatReader) readConnStat(retList []string, maxCount int) (bool, []string) {
  1380  	fields := dsRowField
  1381  	isAppend := false
  1382  	if sr.connStat == nil {
  1383  		sr.connStat = sr.getConnStat("", fields)
  1384  		sr.connStatColLens = calcColLens(sr.connStat, fields, COL_MAX_LEN)
  1385  		isAppend = false
  1386  	} else {
  1387  		isAppend = true
  1388  	}
  1389  	var retContent []map[string]interface{}
  1390  	if maxCount > 0 && len(sr.connStat) > maxCount {
  1391  		retContent = sr.connStat[0:maxCount]
  1392  		sr.connStat = sr.connStat[maxCount:len(sr.connStat)]
  1393  	} else {
  1394  		retContent = sr.connStat
  1395  		sr.connStat = nil
  1396  	}
  1397  	retList = append(retList, sr.getFormattedOutput(retContent, fields, sr.connStatColLens, isAppend))
  1398  	return sr.connStat != nil, retList
  1399  }
  1400  
  1401  func (sr *StatReader) readHighFreqSqlStat(retList []string, maxCount int) (bool, []string) {
  1402  	isAppend := false
  1403  	if sr.highFreqSqlStat == nil {
  1404  		sr.highFreqSqlStat = sr.getHighFreqSqlStat(StatHighFreqSqlCount, -1, sqlRowField)
  1405  		sr.highFreqSqlStatColLens = calcColLens(sr.highFreqSqlStat, sqlRowField, COL_MAX_LEN)
  1406  		isAppend = false
  1407  	} else {
  1408  		isAppend = true
  1409  	}
  1410  	var retContent []map[string]interface{}
  1411  	if maxCount > 0 && len(sr.highFreqSqlStat) > maxCount {
  1412  		retContent = sr.highFreqSqlStat[0:maxCount]
  1413  		sr.highFreqSqlStat = sr.highFreqSqlStat[maxCount:len(sr.highFreqSqlStat)]
  1414  	} else {
  1415  		retContent = sr.highFreqSqlStat
  1416  		sr.highFreqSqlStat = nil
  1417  	}
  1418  	retList = append(retList, sr.getFormattedOutput(retContent, sqlRowField, sr.highFreqSqlStatColLens, isAppend))
  1419  	return sr.highFreqSqlStat != nil, retList
  1420  }
  1421  
  1422  func (sr *StatReader) getHighFreqSqlStat(topCount int, sqlId int,
  1423  	fields []string) []map[string]interface{} {
  1424  	var content []map[string]interface{}
  1425  
  1426  	if topCount != 0 {
  1427  		parameters := NewProperties()
  1428  		parameters.Set(PROP_NAME_SORT_FIELD, "ExecuteCount")
  1429  		parameters.Set(PROP_NAME_SORT_TYPE, "desc")
  1430  		parameters.Set(PROP_NAME_PAGE_NUM, "1")
  1431  		parameters.Set(PROP_NAME_PAGE_SIZE, strconv.Itoa(topCount))
  1432  		content = sr.service(URL_SQL, parameters)
  1433  		if sqlId != -1 {
  1434  			matchedContent := make([]map[string]interface{}, 0)
  1435  			for _, sqlStat := range content {
  1436  				idStr := sqlStat["ID"]
  1437  				if idStr == sqlId {
  1438  					matchedContent = append(matchedContent, sqlStat)
  1439  					break
  1440  				}
  1441  			}
  1442  			content = matchedContent
  1443  		}
  1444  	}
  1445  
  1446  	if content == nil {
  1447  		content = make([]map[string]interface{}, 0)
  1448  	} else {
  1449  		i := 1
  1450  		for _, m := range content {
  1451  			m[rowNumConstStr] = i
  1452  			i++
  1453  		}
  1454  	}
  1455  	content = addTitles(content, fields)
  1456  	return content
  1457  }
  1458  
  1459  func (sr *StatReader) readSlowSqlStat(retList []string, maxCount int) (bool, []string) {
  1460  	isAppend := false
  1461  	if sr.slowSqlStat == nil {
  1462  		sr.slowSqlStat = sr.getSlowSqlStat(StatSlowSqlCount, -1, sqlRowField)
  1463  		sr.slowSqlStatColLens = calcColLens(sr.slowSqlStat, sqlRowField,
  1464  			COL_MAX_LEN)
  1465  		isAppend = false
  1466  	} else {
  1467  		isAppend = true
  1468  	}
  1469  	var retContent []map[string]interface{}
  1470  	if maxCount > 0 && len(sr.slowSqlStat) > maxCount {
  1471  		retContent = sr.slowSqlStat[0:maxCount]
  1472  		sr.slowSqlStat = sr.slowSqlStat[maxCount:len(sr.slowSqlStat)]
  1473  	} else {
  1474  		retContent = sr.slowSqlStat
  1475  		sr.slowSqlStat = nil
  1476  	}
  1477  	retList = append(retList, sr.getFormattedOutput(retContent, sqlRowField, sr.slowSqlStatColLens, isAppend))
  1478  	return sr.slowSqlStat != nil, retList
  1479  }
  1480  
  1481  func (sr *StatReader) getSlowSqlStat(topCount int, sqlId int, fields []string) []map[string]interface{} {
  1482  	var content []map[string]interface{}
  1483  
  1484  	if topCount != 0 {
  1485  		parameters := NewProperties()
  1486  		parameters.Set(PROP_NAME_SORT_FIELD, "MaxTimespan")
  1487  		parameters.Set(PROP_NAME_SORT_TYPE, "desc")
  1488  		parameters.Set(PROP_NAME_PAGE_NUM, "1")
  1489  		parameters.Set(PROP_NAME_PAGE_SIZE, strconv.Itoa(topCount))
  1490  
  1491  		content = sr.service(URL_SQL, parameters)
  1492  		if sqlId != -1 {
  1493  			matchedContent := make([]map[string]interface{}, 0)
  1494  			for _, sqlStat := range content {
  1495  				idStr := sqlStat["ID"]
  1496  				if idStr == sqlId {
  1497  					matchedContent = append(matchedContent, sqlStat)
  1498  					break
  1499  				}
  1500  			}
  1501  			content = matchedContent
  1502  		}
  1503  	}
  1504  
  1505  	if content == nil {
  1506  		content = make([]map[string]interface{}, 0)
  1507  	} else {
  1508  		i := 1
  1509  		for _, m := range content {
  1510  			m["rowNum"] = i
  1511  			i++
  1512  		}
  1513  	}
  1514  	content = addTitles(content, fields)
  1515  	return content
  1516  }
  1517  
  1518  func (sr *StatReader) getConnStat(connId string, fields []string) []map[string]interface{} {
  1519  	content := sr.service(URL_DATASOURCE, nil)
  1520  	if connId != "" {
  1521  		matchedContent := make([]map[string]interface{}, 0)
  1522  		for _, dsStat := range content {
  1523  			idStr := dsStat["Identity"]
  1524  			if connId == idStr {
  1525  				matchedContent = append(matchedContent, dsStat)
  1526  				break
  1527  			}
  1528  		}
  1529  		content = matchedContent
  1530  	}
  1531  	if content == nil {
  1532  		content = make([]map[string]interface{}, 0)
  1533  	} else {
  1534  		i := 1
  1535  		for _, m := range content {
  1536  			m["rowNum"] = i
  1537  			i++
  1538  		}
  1539  	}
  1540  	content = addTitles(content, fields)
  1541  	return content
  1542  }
  1543  
  1544  func (sr *StatReader) getFormattedOutput(content []map[string]interface{}, fields []string, colLens []int,
  1545  	isAppend bool) string {
  1546  	return toTable(content, fields, colLens, true, isAppend)
  1547  }
  1548  
  1549  func (sr *StatReader) parseUrl(url string) *Properties {
  1550  	parameters := NewProperties()
  1551  
  1552  	if url == "" || len(strings.TrimSpace(url)) == 0 {
  1553  		return parameters
  1554  	}
  1555  
  1556  	parametersStr := util.StringUtil.SubstringBetween(url, "?", "")
  1557  	if parametersStr == "" || len(parametersStr) == 0 {
  1558  		return parameters
  1559  	}
  1560  
  1561  	parametersArray := strings.Split(parametersStr, "&")
  1562  
  1563  	for _, parameterStr := range parametersArray {
  1564  		index := strings.Index(parametersStr, "=")
  1565  		if index <= 0 {
  1566  			continue
  1567  		}
  1568  
  1569  		name := parameterStr[0:index]
  1570  		value := parameterStr[index+1:]
  1571  		parameters.Set(name, value)
  1572  	}
  1573  	return parameters
  1574  }
  1575  
  1576  func (sr *StatReader) service(url string, params *Properties) []map[string]interface{} {
  1577  	if params != nil {
  1578  		params.SetProperties(sr.parseUrl(url))
  1579  	} else {
  1580  		params = sr.parseUrl(url)
  1581  	}
  1582  
  1583  	if strings.Index(url, URL_SQL) == 0 {
  1584  		array := sr.getSqlStatList(params)
  1585  		array = sr.comparatorOrderBy(array, params)
  1586  		params.Set(PROP_NAME_FLUSH_FREQ, strconv.Itoa(StatFlushFreq))
  1587  		return array
  1588  	} else if strings.Index(url, URL_SQL_DETAIL) == 0 {
  1589  		array := sr.getSqlStatDetailList(params)
  1590  		return array
  1591  	} else if strings.Index(url, URL_DATASOURCE) == 0 {
  1592  		array := sr.getConnStatList(params)
  1593  		array = sr.comparatorOrderBy(array, params)
  1594  		params.Set(PROP_NAME_FLUSH_FREQ, strconv.Itoa(StatFlushFreq))
  1595  		return array
  1596  	} else if strings.Index(url, URL_DATASOURCE_DETAIL) == 0 {
  1597  		array := sr.getConnStatDetailList(params)
  1598  		return array
  1599  	} else {
  1600  		return nil
  1601  	}
  1602  }
  1603  
  1604  func (sr *StatReader) getSqlStatList(params *Properties) []map[string]interface{} {
  1605  	array := make([]map[string]interface{}, 0)
  1606  	connStatMap := goStat.getConnStatMap()
  1607  	var sqlStatMap map[string]*sqlStat
  1608  	for _, connStat := range connStatMap {
  1609  		sqlStatMap = connStat.getSqlStatMap()
  1610  		for _, sqlStat := range sqlStatMap {
  1611  			data := sqlStat.getData()
  1612  			executeCount := data[executeCountConstStr]
  1613  			runningCount := data[runningCountConstStr]
  1614  			if executeCount == 0 && runningCount == 0 {
  1615  				continue
  1616  			}
  1617  
  1618  			array = append(array, data)
  1619  		}
  1620  	}
  1621  
  1622  	return array
  1623  }
  1624  
  1625  func (sr *StatReader) getSqlStatDetailList(params *Properties) []map[string]interface{} {
  1626  	array := make([]map[string]interface{}, 0)
  1627  	connStatMap := goStat.getConnStatMap()
  1628  	var data *sqlStat
  1629  	sqlId := ""
  1630  	dsId := ""
  1631  	if v := params.GetString(PROP_NAME_SQL_ID, ""); v != "" {
  1632  		sqlId = v
  1633  	}
  1634  	if v := params.GetString(PROP_NAME_DATASOURCE_ID, ""); v != "" {
  1635  		dsId = v
  1636  	}
  1637  	if sqlId != "" && dsId != "" {
  1638  		for _, connStat := range connStatMap {
  1639  			if dsId != connStat.id {
  1640  				continue
  1641  			} else {
  1642  				sqlStatMap := connStat.getSqlStatMap()
  1643  				for _, sqlStat := range sqlStatMap {
  1644  
  1645  					if sqlId == sqlStat.Id {
  1646  						data = sqlStat
  1647  						break
  1648  					}
  1649  				}
  1650  			}
  1651  			break
  1652  		}
  1653  	}
  1654  	if data != nil {
  1655  
  1656  		array = append(array, data.getData())
  1657  
  1658  	}
  1659  	return array
  1660  }
  1661  
  1662  func (sr *StatReader) getConnStatList(params *Properties) []map[string]interface{} {
  1663  	array := make([]map[string]interface{}, 0)
  1664  	connStatMap := goStat.getConnStatMap()
  1665  	id := ""
  1666  	if v := params.GetString(PROP_NAME_DATASOURCE_ID, ""); v != "" {
  1667  		id = v
  1668  	}
  1669  	for _, connStat := range connStatMap {
  1670  		data := connStat.getData()
  1671  
  1672  		connCount := data["ConnCount"]
  1673  
  1674  		if connCount == 0 {
  1675  			continue
  1676  		}
  1677  
  1678  		if id != "" {
  1679  			if id == connStat.id {
  1680  				array = append(array, data)
  1681  				break
  1682  			} else {
  1683  				continue
  1684  			}
  1685  		} else {
  1686  
  1687  			array = append(array, data)
  1688  		}
  1689  
  1690  	}
  1691  	return array
  1692  }
  1693  
  1694  func (sr *StatReader) getConnStatDetailList(params *Properties) []map[string]interface{} {
  1695  	array := make([]map[string]interface{}, 0)
  1696  	var data *connectionStat
  1697  	connStatMap := goStat.getConnStatMap()
  1698  	id := ""
  1699  	if v := params.GetString(PROP_NAME_DATASOURCE_ID, ""); v != "" {
  1700  		id = v
  1701  	}
  1702  	if id != "" {
  1703  		for _, connStat := range connStatMap {
  1704  			if id == connStat.id {
  1705  				data = connStat
  1706  				break
  1707  			}
  1708  		}
  1709  	}
  1710  	if data != nil {
  1711  		dataValue := data.getValue(false)
  1712  		m := make(map[string]interface{}, 2)
  1713  		m["name"] = "数据源"
  1714  		m["value"] = dataValue.url
  1715  		array = append(array, m)
  1716  
  1717  		m = make(map[string]interface{}, 2)
  1718  		m["name"] = "总会话数"
  1719  		m["value"] = dataValue.connCount
  1720  		array = append(array, m)
  1721  
  1722  		m = make(map[string]interface{}, 2)
  1723  		m["name"] = "活动会话数"
  1724  		m["value"] = dataValue.activeConnCount
  1725  		array = append(array, m)
  1726  
  1727  		m = make(map[string]interface{}, 2)
  1728  		m["name"] = "活动会话数峰值"
  1729  		m["value"] = dataValue.maxActiveStmtCount
  1730  		array = append(array, m)
  1731  
  1732  		m = make(map[string]interface{}, 2)
  1733  		m["name"] = "总句柄数"
  1734  		m["value"] = dataValue.stmtCount
  1735  		array = append(array, m)
  1736  
  1737  		m = make(map[string]interface{}, 2)
  1738  		m["name"] = "活动句柄数"
  1739  		m["value"] = dataValue.activeStmtCount
  1740  		array = append(array, m)
  1741  
  1742  		m = make(map[string]interface{}, 2)
  1743  		m["name"] = "活动句柄数峰值"
  1744  		m["value"] = dataValue.maxActiveStmtCount
  1745  		array = append(array, m)
  1746  
  1747  		m = make(map[string]interface{}, 2)
  1748  		m["name"] = "执行次数"
  1749  		m["value"] = dataValue.executeCount
  1750  		array = append(array, m)
  1751  
  1752  		m = make(map[string]interface{}, 2)
  1753  		m["name"] = "执行出错次数"
  1754  		m["value"] = dataValue.errorCount
  1755  		array = append(array, m)
  1756  
  1757  		m = make(map[string]interface{}, 2)
  1758  		m["name"] = "提交次数"
  1759  		m["value"] = dataValue.commitCount
  1760  		array = append(array, m)
  1761  
  1762  		m = make(map[string]interface{}, 2)
  1763  		m["name"] = "回滚次数"
  1764  		m["value"] = dataValue.rollbackCount
  1765  		array = append(array, m)
  1766  
  1767  	}
  1768  	return array
  1769  }
  1770  
  1771  type mapSlice struct {
  1772  	m          []map[string]interface{}
  1773  	isDesc     bool
  1774  	orderByKey string
  1775  }
  1776  
  1777  func newMapSlice(m []map[string]interface{}, isDesc bool, orderByKey string) *mapSlice {
  1778  	ms := new(mapSlice)
  1779  	ms.m = m
  1780  	ms.isDesc = isDesc
  1781  	ms.orderByKey = orderByKey
  1782  	return ms
  1783  }
  1784  
  1785  func (ms mapSlice) Len() int { return len(ms.m) }
  1786  
  1787  func (ms mapSlice) Less(i, j int) bool {
  1788  	m1 := ms.m[i]
  1789  	m2 := ms.m[j]
  1790  	v1 := m1[ms.orderByKey]
  1791  	v2 := m2[ms.orderByKey]
  1792  	if v1 == nil {
  1793  		return true
  1794  	} else if v2 == nil {
  1795  		return false
  1796  	}
  1797  
  1798  	switch v1.(type) {
  1799  	case int64:
  1800  		return v1.(int64) < v2.(int64)
  1801  	case float64:
  1802  		return v1.(float64) < v2.(float64)
  1803  	default:
  1804  		return true
  1805  	}
  1806  }
  1807  
  1808  func (ms mapSlice) Swap(i, j int) {
  1809  	ms.m[i], ms.m[j] = ms.m[j], ms.m[i]
  1810  }
  1811  
  1812  func (sr *StatReader) comparatorOrderBy(array []map[string]interface{}, params *Properties) []map[string]interface{} {
  1813  	if array == nil {
  1814  		array = make([]map[string]interface{}, 0)
  1815  	}
  1816  
  1817  	orderBy := DEFAULT_ORDERBY
  1818  	orderType := DEFAULT_ORDER_TYPE
  1819  	pageNum := DEFAULT_PAGE_NUM
  1820  	pageSize := DEFAULT_PAGE_SIZE
  1821  	if params != nil {
  1822  		if v := params.GetTrimString(PROP_NAME_SORT_FIELD, ""); v != "" {
  1823  			orderBy = v
  1824  		}
  1825  
  1826  		if v := params.GetTrimString(PROP_NAME_SORT_TYPE, ""); v != "" {
  1827  			orderType = v
  1828  		}
  1829  
  1830  		if v := params.GetTrimString(PROP_NAME_PAGE_NUM, ""); v != "" {
  1831  			var err error
  1832  			pageNum, err = strconv.Atoi(v)
  1833  			if err != nil {
  1834  				pageNum = DEFAULT_PAGE_NUM
  1835  			}
  1836  		}
  1837  		if v := params.GetTrimString(PROP_NAME_PAGE_SIZE, ""); v != "" {
  1838  			var err error
  1839  			pageSize, err = strconv.Atoi(v)
  1840  			if err != nil {
  1841  				pageSize = DEFAULT_PAGE_SIZE
  1842  			}
  1843  		}
  1844  	}
  1845  
  1846  	rowCount := len(array)
  1847  	pageCount := int(math.Ceil(float64(rowCount * 1.0 / pageSize)))
  1848  	if pageCount < 1 {
  1849  		pageCount = 1
  1850  	}
  1851  
  1852  	if pageNum > pageCount {
  1853  		pageNum = pageCount
  1854  	}
  1855  
  1856  	if len(array) > 0 {
  1857  
  1858  		if orderBy != "" {
  1859  			sort.Sort(newMapSlice(array, !(DEFAULT_ORDER_TYPE == orderType), orderBy))
  1860  		}
  1861  
  1862  		fromIndex := (pageNum - 1) * pageSize
  1863  
  1864  		toIndex := pageNum * pageSize
  1865  		if toIndex > rowCount {
  1866  			toIndex = rowCount
  1867  		}
  1868  		array = array[fromIndex:toIndex]
  1869  	}
  1870  	sr.resetPageInfo(params, rowCount, pageCount, pageNum)
  1871  	return array
  1872  }
  1873  
  1874  func (sr *StatReader) resetPageInfo(params *Properties, rowCount int, pageCount int, pageNum int) {
  1875  
  1876  	if params != nil {
  1877  		v := params.GetString(PROP_NAME_PAGE_SIZE, "")
  1878  		if v != "" {
  1879  
  1880  			params.Set(PROP_NAME_PAGE_COUNT, strconv.Itoa(pageCount))
  1881  			params.Set(PROP_NAME_TOTAL_ROW_COUNT, strconv.Itoa(rowCount))
  1882  			params.Set(PROP_NAME_PAGE_NUM, strconv.Itoa(pageNum))
  1883  		}
  1884  	}
  1885  }
  1886  
  1887  const COL_MAX_LEN = 32
  1888  
  1889  func calcColLens(objList []map[string]interface{}, fields []string, maxColLen int) []int {
  1890  
  1891  	colLen := 0
  1892  	colVal := ""
  1893  	colLens := make([]int, len(fields))
  1894  	for _, obj := range objList {
  1895  		for i := 0; i < len(fields); i++ {
  1896  			colVal = getColValue(obj[fields[i]])
  1897  			colLen = len(colVal)
  1898  			if colLen > colLens[i] {
  1899  				colLens[i] = colLen
  1900  			}
  1901  		}
  1902  	}
  1903  	if maxColLen > 0 {
  1904  		for i := 0; i < len(fields); i++ {
  1905  			if colLens[i] > maxColLen {
  1906  				colLens[i] = maxColLen
  1907  			}
  1908  		}
  1909  	}
  1910  	return colLens
  1911  }
  1912  
  1913  func addTitles(objList []map[string]interface{}, fields []string) []map[string]interface{} {
  1914  	titleMap := make(map[string]interface{}, len(fields))
  1915  	for i := 0; i < len(fields); i++ {
  1916  		titleMap[fields[i]] = fields[i]
  1917  	}
  1918  
  1919  	dst := append(objList, titleMap)
  1920  	copy(dst[1:], dst[:len(dst)-1])
  1921  	dst[0] = titleMap
  1922  	return dst
  1923  }
  1924  
  1925  func toTable(objList []map[string]interface{}, fields []string, colLens []int,
  1926  	showAll bool, append bool) string {
  1927  	if fields == nil || objList == nil {
  1928  		return ""
  1929  	}
  1930  
  1931  	if colLens == nil {
  1932  		colLens = calcColLens(objList, fields, COL_MAX_LEN)
  1933  	}
  1934  
  1935  	output := &strings.Builder{}
  1936  	if !append {
  1937  		sepLine(output, colLens)
  1938  	}
  1939  
  1940  	for _, obj := range objList {
  1941  		objMore := obj
  1942  		for objMore != nil {
  1943  			objMore = formateLine(output, objMore, fields, colLens, showAll)
  1944  		}
  1945  		sepLine(output, colLens)
  1946  	}
  1947  
  1948  	return output.String()
  1949  }
  1950  
  1951  func formateLine(output *strings.Builder, obj map[string]interface{}, fields []string, colLens []int,
  1952  	showAll bool) map[string]interface{} {
  1953  	hasMore := false
  1954  	objMore := make(map[string]interface{})
  1955  	colLen := 0
  1956  	colVal := ""
  1957  	for i := 0; i < len(fields); i++ {
  1958  		colVal = getColValue(obj[fields[i]])
  1959  
  1960  		colLen = len(colVal)
  1961  		if colLen <= colLens[i] {
  1962  			output.WriteString("|")
  1963  			output.WriteString(colVal)
  1964  			blanks(output, colLens[i]-colLen)
  1965  			if showAll {
  1966  				objMore[fields[i]] = ""
  1967  			}
  1968  		} else {
  1969  			output.WriteString("|")
  1970  			if showAll {
  1971  				output.WriteString(colVal[0:colLens[i]])
  1972  				objMore[fields[i]] = colVal[colLens[i]:]
  1973  				hasMore = true
  1974  			} else {
  1975  				output.WriteString(colVal[0:colLens[i]-3] + "...")
  1976  			}
  1977  		}
  1978  	}
  1979  	output.WriteString("|")
  1980  	output.WriteString(util.StringUtil.LineSeparator())
  1981  
  1982  	if hasMore {
  1983  		return objMore
  1984  	} else {
  1985  		return nil
  1986  	}
  1987  }
  1988  
  1989  func sepLine(output *strings.Builder, colLens []int) {
  1990  	output.WriteString("+")
  1991  	for _, colLen := range colLens {
  1992  		for i := 0; i < colLen; i++ {
  1993  			output.WriteString("+")
  1994  		}
  1995  		output.WriteString("+")
  1996  	}
  1997  	output.WriteString(util.StringUtil.LineSeparator())
  1998  }
  1999  
  2000  func blanks(output *strings.Builder, count int) {
  2001  	for count > 0 {
  2002  		output.WriteString(" ")
  2003  		count--
  2004  	}
  2005  }
  2006  
  2007  func getColValue(colObj interface{}) string {
  2008  	var colVal string
  2009  	if colObj == nil {
  2010  		colVal = ""
  2011  	} else {
  2012  		colVal = fmt.Sprint(colObj)
  2013  	}
  2014  
  2015  	colVal = strings.Replace(colVal, "\t", "", -1)
  2016  	colVal = strings.Replace(colVal, "\n", "", -1)
  2017  	colVal = strings.Replace(colVal, "\r", "", -1)
  2018  
  2019  	return colVal
  2020  }
  2021  
  2022  const (
  2023  	READ_MAX_SIZE = 100
  2024  )
  2025  
  2026  type statFlusher struct {
  2027  	sr         *StatReader
  2028  	logList    []string
  2029  	date       string
  2030  	logFile    *os.File
  2031  	flushFreq  int
  2032  	filePath   string
  2033  	filePrefix string
  2034  	buffer     *Dm_build_280
  2035  }
  2036  
  2037  func newStatFlusher() *statFlusher {
  2038  	sf := new(statFlusher)
  2039  	sf.sr = newStatReader()
  2040  	sf.logList = make([]string, 0, 32)
  2041  	sf.date = time.Now().Format("2006-01-02")
  2042  	sf.flushFreq = StatFlushFreq
  2043  	sf.filePath = StatDir
  2044  	sf.filePrefix = "dm_go_stat"
  2045  	sf.buffer = Dm_build_284()
  2046  	return sf
  2047  }
  2048  
  2049  func (sf *statFlusher) isConnStatEnabled() bool {
  2050  	return StatEnable
  2051  }
  2052  
  2053  func (sf *statFlusher) isSlowSqlStatEnabled() bool {
  2054  	return StatEnable
  2055  }
  2056  
  2057  func (sf *statFlusher) isHighFreqSqlStatEnabled() bool {
  2058  	return StatEnable
  2059  }
  2060  
  2061  func (sf *statFlusher) doRun() {
  2062  
  2063  	for {
  2064  		if len(goStat.connStatMap) > 0 {
  2065  			sf.logList = append(sf.logList, time.Now().String())
  2066  			if sf.isConnStatEnabled() {
  2067  				sf.logList = append(sf.logList, "#connection stat")
  2068  				hasMore := true
  2069  				for hasMore {
  2070  					hasMore, sf.logList = sf.sr.readConnStat(sf.logList, READ_MAX_SIZE)
  2071  					sf.writeAndFlush(sf.logList, 0, len(sf.logList))
  2072  					sf.logList = sf.logList[0:0]
  2073  				}
  2074  			}
  2075  			if sf.isHighFreqSqlStatEnabled() {
  2076  				sf.logList = append(sf.logList, "#top "+strconv.Itoa(StatHighFreqSqlCount)+" high freq sql stat")
  2077  				hasMore := true
  2078  				for hasMore {
  2079  					hasMore, sf.logList = sf.sr.readHighFreqSqlStat(sf.logList, READ_MAX_SIZE)
  2080  					sf.writeAndFlush(sf.logList, 0, len(sf.logList))
  2081  					sf.logList = sf.logList[0:0]
  2082  				}
  2083  			}
  2084  			if sf.isSlowSqlStatEnabled() {
  2085  				sf.logList = append(sf.logList, "#top "+strconv.Itoa(StatSlowSqlCount)+" slow sql stat")
  2086  				hasMore := true
  2087  				for hasMore {
  2088  					hasMore, sf.logList = sf.sr.readSlowSqlStat(sf.logList, READ_MAX_SIZE)
  2089  					sf.writeAndFlush(sf.logList, 0, len(sf.logList))
  2090  					sf.logList = sf.logList[0:0]
  2091  				}
  2092  			}
  2093  			sf.logList = append(sf.logList, util.StringUtil.LineSeparator())
  2094  			sf.logList = append(sf.logList, util.StringUtil.LineSeparator())
  2095  			sf.writeAndFlush(sf.logList, 0, len(sf.logList))
  2096  			sf.logList = sf.logList[0:0]
  2097  			time.Sleep(time.Duration(StatFlushFreq) * time.Second)
  2098  		}
  2099  	}
  2100  }
  2101  
  2102  func (sf *statFlusher) writeAndFlush(logs []string, startOff int, l int) {
  2103  	var bytes []byte
  2104  	for i := startOff; i < startOff+l; i++ {
  2105  		bytes = []byte(logs[i] + util.StringUtil.LineSeparator())
  2106  
  2107  		sf.buffer.Dm_build_306(bytes, 0, len(bytes))
  2108  
  2109  		if sf.buffer.Dm_build_285() >= FLUSH_SIZE {
  2110  			sf.doFlush(sf.buffer)
  2111  		}
  2112  	}
  2113  
  2114  	if sf.buffer.Dm_build_285() > 0 {
  2115  		sf.doFlush(sf.buffer)
  2116  	}
  2117  }
  2118  
  2119  func (sf *statFlusher) doFlush(buffer *Dm_build_280) {
  2120  	if sf.needCreateNewFile() {
  2121  		sf.closeCurrentFile()
  2122  		sf.logFile = sf.createNewFile()
  2123  	}
  2124  	buffer.Dm_build_300(sf.logFile, buffer.Dm_build_285())
  2125  }
  2126  func (sf *statFlusher) closeCurrentFile() {
  2127  	if sf.logFile != nil {
  2128  		sf.logFile.Close()
  2129  		sf.logFile = nil
  2130  	}
  2131  }
  2132  func (sf *statFlusher) createNewFile() *os.File {
  2133  	sf.date = time.Now().Format("2006-01-02")
  2134  	fileName := sf.filePrefix + "_" + sf.date + "_" + strconv.Itoa(time.Now().Nanosecond()) + ".txt"
  2135  	sf.filePath = StatDir
  2136  	if len(sf.filePath) > 0 {
  2137  		if _, err := os.Stat(sf.filePath); err != nil {
  2138  			os.MkdirAll(sf.filePath, 0755)
  2139  		}
  2140  		if _, err := os.Stat(sf.filePath + fileName); err != nil {
  2141  			logFile, err := os.Create(sf.filePath + fileName)
  2142  			if err != nil {
  2143  				panic(err)
  2144  			}
  2145  			return logFile
  2146  		}
  2147  	}
  2148  	return nil
  2149  }
  2150  func (sf *statFlusher) needCreateNewFile() bool {
  2151  	now := time.Now().Format("2006-01-02")
  2152  	fileInfo, err := sf.logFile.Stat()
  2153  	return now != sf.date || err != nil || sf.logFile == nil || fileInfo.Size() > int64(MAX_FILE_SIZE)
  2154  }