github.com/linapex/ethereum-dpos-chinese@v0.0.0-20190316121959-b78b3a4a1ece/metrics/influxdb/influxdb.go (about)

     1  
     2  //<developer>
     3  //    <name>linapex 曹一峰</name>
     4  //    <email>linapex@163.com</email>
     5  //    <wx>superexc</wx>
     6  //    <qqgroup>128148617</qqgroup>
     7  //    <url>https://jsq.ink</url>
     8  //    <role>pku engineer</role>
     9  //    <date>2019-03-16 12:09:42</date>
    10  //</624342649205166080>
    11  
    12  package influxdb
    13  
    14  import (
    15  	"fmt"
    16  	uurl "net/url"
    17  	"time"
    18  
    19  	"github.com/ethereum/go-ethereum/log"
    20  	"github.com/ethereum/go-ethereum/metrics"
    21  	"github.com/influxdata/influxdb/client"
    22  )
    23  
    24  type reporter struct {
    25  	reg      metrics.Registry
    26  	interval time.Duration
    27  
    28  	url       uurl.URL
    29  	database  string
    30  	username  string
    31  	password  string
    32  	namespace string
    33  	tags      map[string]string
    34  
    35  	client *client.Client
    36  
    37  	cache map[string]int64
    38  }
    39  
    40  //influxdb启动一个influxdb报告程序,该报告程序将在每个d间隔发布来自给定metrics.registry的。
    41  func InfluxDB(r metrics.Registry, d time.Duration, url, database, username, password, namespace string) {
    42  	InfluxDBWithTags(r, d, url, database, username, password, namespace, nil)
    43  }
    44  
    45  //influxdb with tags启动一个influxdb报告程序,该报告程序将在每个d间隔使用指定的标记从给定的metrics.registry中发布
    46  func InfluxDBWithTags(r metrics.Registry, d time.Duration, url, database, username, password, namespace string, tags map[string]string) {
    47  	u, err := uurl.Parse(url)
    48  	if err != nil {
    49  		log.Warn("Unable to parse InfluxDB", "url", url, "err", err)
    50  		return
    51  	}
    52  
    53  	rep := &reporter{
    54  		reg:       r,
    55  		interval:  d,
    56  		url:       *u,
    57  		database:  database,
    58  		username:  username,
    59  		password:  password,
    60  		namespace: namespace,
    61  		tags:      tags,
    62  		cache:     make(map[string]int64),
    63  	}
    64  	if err := rep.makeClient(); err != nil {
    65  		log.Warn("Unable to make InfluxDB client", "err", err)
    66  		return
    67  	}
    68  
    69  	rep.run()
    70  }
    71  
    72  func (r *reporter) makeClient() (err error) {
    73  	r.client, err = client.NewClient(client.Config{
    74  		URL:      r.url,
    75  		Username: r.username,
    76  		Password: r.password,
    77  	})
    78  
    79  	return
    80  }
    81  
    82  func (r *reporter) run() {
    83  	intervalTicker := time.Tick(r.interval)
    84  	pingTicker := time.Tick(time.Second * 5)
    85  
    86  	for {
    87  		select {
    88  		case <-intervalTicker:
    89  			if err := r.send(); err != nil {
    90  				log.Warn("Unable to send to InfluxDB", "err", err)
    91  			}
    92  		case <-pingTicker:
    93  			_, _, err := r.client.Ping()
    94  			if err != nil {
    95  				log.Warn("Got error while sending a ping to InfluxDB, trying to recreate client", "err", err)
    96  
    97  				if err = r.makeClient(); err != nil {
    98  					log.Warn("Unable to make InfluxDB client", "err", err)
    99  				}
   100  			}
   101  		}
   102  	}
   103  }
   104  
   105  func (r *reporter) send() error {
   106  	var pts []client.Point
   107  
   108  	r.reg.Each(func(name string, i interface{}) {
   109  		now := time.Now()
   110  		namespace := r.namespace
   111  
   112  		switch metric := i.(type) {
   113  		case metrics.Counter:
   114  			v := metric.Count()
   115  			l := r.cache[name]
   116  			pts = append(pts, client.Point{
   117  				Measurement: fmt.Sprintf("%s%s.count", namespace, name),
   118  				Tags:        r.tags,
   119  				Fields: map[string]interface{}{
   120  					"value": v - l,
   121  				},
   122  				Time: now,
   123  			})
   124  			r.cache[name] = v
   125  		case metrics.Gauge:
   126  			ms := metric.Snapshot()
   127  			pts = append(pts, client.Point{
   128  				Measurement: fmt.Sprintf("%s%s.gauge", namespace, name),
   129  				Tags:        r.tags,
   130  				Fields: map[string]interface{}{
   131  					"value": ms.Value(),
   132  				},
   133  				Time: now,
   134  			})
   135  		case metrics.GaugeFloat64:
   136  			ms := metric.Snapshot()
   137  			pts = append(pts, client.Point{
   138  				Measurement: fmt.Sprintf("%s%s.gauge", namespace, name),
   139  				Tags:        r.tags,
   140  				Fields: map[string]interface{}{
   141  					"value": ms.Value(),
   142  				},
   143  				Time: now,
   144  			})
   145  		case metrics.Histogram:
   146  			ms := metric.Snapshot()
   147  			ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
   148  			pts = append(pts, client.Point{
   149  				Measurement: fmt.Sprintf("%s%s.histogram", namespace, name),
   150  				Tags:        r.tags,
   151  				Fields: map[string]interface{}{
   152  					"count":    ms.Count(),
   153  					"max":      ms.Max(),
   154  					"mean":     ms.Mean(),
   155  					"min":      ms.Min(),
   156  					"stddev":   ms.StdDev(),
   157  					"variance": ms.Variance(),
   158  					"p50":      ps[0],
   159  					"p75":      ps[1],
   160  					"p95":      ps[2],
   161  					"p99":      ps[3],
   162  					"p999":     ps[4],
   163  					"p9999":    ps[5],
   164  				},
   165  				Time: now,
   166  			})
   167  		case metrics.Meter:
   168  			ms := metric.Snapshot()
   169  			pts = append(pts, client.Point{
   170  				Measurement: fmt.Sprintf("%s%s.meter", namespace, name),
   171  				Tags:        r.tags,
   172  				Fields: map[string]interface{}{
   173  					"count": ms.Count(),
   174  					"m1":    ms.Rate1(),
   175  					"m5":    ms.Rate5(),
   176  					"m15":   ms.Rate15(),
   177  					"mean":  ms.RateMean(),
   178  				},
   179  				Time: now,
   180  			})
   181  		case metrics.Timer:
   182  			ms := metric.Snapshot()
   183  			ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
   184  			pts = append(pts, client.Point{
   185  				Measurement: fmt.Sprintf("%s%s.timer", namespace, name),
   186  				Tags:        r.tags,
   187  				Fields: map[string]interface{}{
   188  					"count":    ms.Count(),
   189  					"max":      ms.Max(),
   190  					"mean":     ms.Mean(),
   191  					"min":      ms.Min(),
   192  					"stddev":   ms.StdDev(),
   193  					"variance": ms.Variance(),
   194  					"p50":      ps[0],
   195  					"p75":      ps[1],
   196  					"p95":      ps[2],
   197  					"p99":      ps[3],
   198  					"p999":     ps[4],
   199  					"p9999":    ps[5],
   200  					"m1":       ms.Rate1(),
   201  					"m5":       ms.Rate5(),
   202  					"m15":      ms.Rate15(),
   203  					"meanrate": ms.RateMean(),
   204  				},
   205  				Time: now,
   206  			})
   207  		case metrics.ResettingTimer:
   208  			t := metric.Snapshot()
   209  
   210  			if len(t.Values()) > 0 {
   211  				ps := t.Percentiles([]float64{50, 95, 99})
   212  				val := t.Values()
   213  				pts = append(pts, client.Point{
   214  					Measurement: fmt.Sprintf("%s%s.span", namespace, name),
   215  					Tags:        r.tags,
   216  					Fields: map[string]interface{}{
   217  						"count": len(val),
   218  						"max":   val[len(val)-1],
   219  						"mean":  t.Mean(),
   220  						"min":   val[0],
   221  						"p50":   ps[0],
   222  						"p95":   ps[1],
   223  						"p99":   ps[2],
   224  					},
   225  					Time: now,
   226  				})
   227  			}
   228  		}
   229  	})
   230  
   231  	bps := client.BatchPoints{
   232  		Points:   pts,
   233  		Database: r.database,
   234  	}
   235  
   236  	_, err := r.client.Write(bps)
   237  	return err
   238  }
   239