github.com/codingfuture/orig-energi3@v0.8.4/metrics/influxdb/influxdb.go (about)

     1  // Copyright 2018 The Energi Core Authors
     2  // Copyright 2018 The go-ethereum Authors
     3  // This file is part of the Energi Core library.
     4  //
     5  // The Energi Core library is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Lesser General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // The Energi Core library is distributed in the hope that it will be useful,
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  // GNU Lesser General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Lesser General Public License
    16  // along with the Energi Core library. If not, see <http://www.gnu.org/licenses/>.
    17  
    18  package influxdb
    19  
    20  import (
    21  	"fmt"
    22  	uurl "net/url"
    23  	"time"
    24  
    25  	"github.com/ethereum/go-ethereum/log"
    26  	"github.com/ethereum/go-ethereum/metrics"
    27  	"github.com/influxdata/influxdb/client"
    28  )
    29  
    30  type reporter struct {
    31  	reg      metrics.Registry
    32  	interval time.Duration
    33  
    34  	url       uurl.URL
    35  	database  string
    36  	username  string
    37  	password  string
    38  	namespace string
    39  	tags      map[string]string
    40  
    41  	client *client.Client
    42  
    43  	cache map[string]int64
    44  }
    45  
    46  // InfluxDB starts a InfluxDB reporter which will post the from the given metrics.Registry at each d interval.
    47  func InfluxDB(r metrics.Registry, d time.Duration, url, database, username, password, namespace string) {
    48  	InfluxDBWithTags(r, d, url, database, username, password, namespace, nil)
    49  }
    50  
    51  // InfluxDBWithTags starts a InfluxDB reporter which will post the from the given metrics.Registry at each d interval with the specified tags
    52  func InfluxDBWithTags(r metrics.Registry, d time.Duration, url, database, username, password, namespace string, tags map[string]string) {
    53  	u, err := uurl.Parse(url)
    54  	if err != nil {
    55  		log.Warn("Unable to parse InfluxDB", "url", url, "err", err)
    56  		return
    57  	}
    58  
    59  	rep := &reporter{
    60  		reg:       r,
    61  		interval:  d,
    62  		url:       *u,
    63  		database:  database,
    64  		username:  username,
    65  		password:  password,
    66  		namespace: namespace,
    67  		tags:      tags,
    68  		cache:     make(map[string]int64),
    69  	}
    70  	if err := rep.makeClient(); err != nil {
    71  		log.Warn("Unable to make InfluxDB client", "err", err)
    72  		return
    73  	}
    74  
    75  	rep.run()
    76  }
    77  
    78  // InfluxDBWithTagsOnce runs once an InfluxDB reporter and post the given metrics.Registry with the specified tags
    79  func InfluxDBWithTagsOnce(r metrics.Registry, url, database, username, password, namespace string, tags map[string]string) error {
    80  	u, err := uurl.Parse(url)
    81  	if err != nil {
    82  		return fmt.Errorf("Unable to parse InfluxDB. url: %s, err: %v", url, err)
    83  	}
    84  
    85  	rep := &reporter{
    86  		reg:       r,
    87  		url:       *u,
    88  		database:  database,
    89  		username:  username,
    90  		password:  password,
    91  		namespace: namespace,
    92  		tags:      tags,
    93  		cache:     make(map[string]int64),
    94  	}
    95  	if err := rep.makeClient(); err != nil {
    96  		return fmt.Errorf("Unable to make InfluxDB client. err: %v", err)
    97  	}
    98  
    99  	if err := rep.send(); err != nil {
   100  		return fmt.Errorf("Unable to send to InfluxDB. err: %v", err)
   101  	}
   102  
   103  	return nil
   104  }
   105  
   106  func (r *reporter) makeClient() (err error) {
   107  	r.client, err = client.NewClient(client.Config{
   108  		URL:      r.url,
   109  		Username: r.username,
   110  		Password: r.password,
   111  	})
   112  
   113  	return
   114  }
   115  
   116  func (r *reporter) run() {
   117  	intervalTicker := time.Tick(r.interval)
   118  	pingTicker := time.Tick(time.Second * 5)
   119  
   120  	for {
   121  		select {
   122  		case <-intervalTicker:
   123  			if err := r.send(); err != nil {
   124  				log.Warn("Unable to send to InfluxDB", "err", err)
   125  			}
   126  		case <-pingTicker:
   127  			_, _, err := r.client.Ping()
   128  			if err != nil {
   129  				log.Warn("Got error while sending a ping to InfluxDB, trying to recreate client", "err", err)
   130  
   131  				if err = r.makeClient(); err != nil {
   132  					log.Warn("Unable to make InfluxDB client", "err", err)
   133  				}
   134  			}
   135  		}
   136  	}
   137  }
   138  
   139  func (r *reporter) send() error {
   140  	var pts []client.Point
   141  
   142  	r.reg.Each(func(name string, i interface{}) {
   143  		now := time.Now()
   144  		namespace := r.namespace
   145  
   146  		switch metric := i.(type) {
   147  		case metrics.Counter:
   148  			v := metric.Count()
   149  			l := r.cache[name]
   150  			pts = append(pts, client.Point{
   151  				Measurement: fmt.Sprintf("%s%s.count", namespace, name),
   152  				Tags:        r.tags,
   153  				Fields: map[string]interface{}{
   154  					"value": v - l,
   155  				},
   156  				Time: now,
   157  			})
   158  			r.cache[name] = v
   159  		case metrics.Gauge:
   160  			ms := metric.Snapshot()
   161  			pts = append(pts, client.Point{
   162  				Measurement: fmt.Sprintf("%s%s.gauge", namespace, name),
   163  				Tags:        r.tags,
   164  				Fields: map[string]interface{}{
   165  					"value": ms.Value(),
   166  				},
   167  				Time: now,
   168  			})
   169  		case metrics.GaugeFloat64:
   170  			ms := metric.Snapshot()
   171  			pts = append(pts, client.Point{
   172  				Measurement: fmt.Sprintf("%s%s.gauge", namespace, name),
   173  				Tags:        r.tags,
   174  				Fields: map[string]interface{}{
   175  					"value": ms.Value(),
   176  				},
   177  				Time: now,
   178  			})
   179  		case metrics.Histogram:
   180  			ms := metric.Snapshot()
   181  			ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
   182  			pts = append(pts, client.Point{
   183  				Measurement: fmt.Sprintf("%s%s.histogram", namespace, name),
   184  				Tags:        r.tags,
   185  				Fields: map[string]interface{}{
   186  					"count":    ms.Count(),
   187  					"max":      ms.Max(),
   188  					"mean":     ms.Mean(),
   189  					"min":      ms.Min(),
   190  					"stddev":   ms.StdDev(),
   191  					"variance": ms.Variance(),
   192  					"p50":      ps[0],
   193  					"p75":      ps[1],
   194  					"p95":      ps[2],
   195  					"p99":      ps[3],
   196  					"p999":     ps[4],
   197  					"p9999":    ps[5],
   198  				},
   199  				Time: now,
   200  			})
   201  		case metrics.Meter:
   202  			ms := metric.Snapshot()
   203  			pts = append(pts, client.Point{
   204  				Measurement: fmt.Sprintf("%s%s.meter", namespace, name),
   205  				Tags:        r.tags,
   206  				Fields: map[string]interface{}{
   207  					"count": ms.Count(),
   208  					"m1":    ms.Rate1(),
   209  					"m5":    ms.Rate5(),
   210  					"m15":   ms.Rate15(),
   211  					"mean":  ms.RateMean(),
   212  				},
   213  				Time: now,
   214  			})
   215  		case metrics.Timer:
   216  			ms := metric.Snapshot()
   217  			ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
   218  			pts = append(pts, client.Point{
   219  				Measurement: fmt.Sprintf("%s%s.timer", namespace, name),
   220  				Tags:        r.tags,
   221  				Fields: map[string]interface{}{
   222  					"count":    ms.Count(),
   223  					"max":      ms.Max(),
   224  					"mean":     ms.Mean(),
   225  					"min":      ms.Min(),
   226  					"stddev":   ms.StdDev(),
   227  					"variance": ms.Variance(),
   228  					"p50":      ps[0],
   229  					"p75":      ps[1],
   230  					"p95":      ps[2],
   231  					"p99":      ps[3],
   232  					"p999":     ps[4],
   233  					"p9999":    ps[5],
   234  					"m1":       ms.Rate1(),
   235  					"m5":       ms.Rate5(),
   236  					"m15":      ms.Rate15(),
   237  					"meanrate": ms.RateMean(),
   238  				},
   239  				Time: now,
   240  			})
   241  		case metrics.ResettingTimer:
   242  			t := metric.Snapshot()
   243  
   244  			if len(t.Values()) > 0 {
   245  				ps := t.Percentiles([]float64{50, 95, 99})
   246  				val := t.Values()
   247  				pts = append(pts, client.Point{
   248  					Measurement: fmt.Sprintf("%s%s.span", namespace, name),
   249  					Tags:        r.tags,
   250  					Fields: map[string]interface{}{
   251  						"count": len(val),
   252  						"max":   val[len(val)-1],
   253  						"mean":  t.Mean(),
   254  						"min":   val[0],
   255  						"p50":   ps[0],
   256  						"p95":   ps[1],
   257  						"p99":   ps[2],
   258  					},
   259  					Time: now,
   260  				})
   261  			}
   262  		}
   263  	})
   264  
   265  	bps := client.BatchPoints{
   266  		Points:   pts,
   267  		Database: r.database,
   268  	}
   269  
   270  	_, err := r.client.Write(bps)
   271  	return err
   272  }