github.com/aigarnetwork/aigar@v0.0.0-20191115204914-d59a6eb70f8e/metrics/influxdb/influxdb.go (about)

     1  //  Copyright 2018 The go-ethereum Authors
     2  //  Copyright 2019 The go-aigar Authors
     3  //  This file is part of the go-aigar library.
     4  //
     5  //  The go-aigar library is free software: you can redistribute it and/or modify
     6  //  it under the terms of the GNU Lesser General Public License as published by
     7  //  the Free Software Foundation, either version 3 of the License, or
     8  //  (at your option) any later version.
     9  //
    10  //  The go-aigar library is distributed in the hope that it will be useful,
    11  //  but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  //  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  //  GNU Lesser General Public License for more details.
    14  //
    15  //  You should have received a copy of the GNU Lesser General Public License
    16  //  along with the go-aigar library. If not, see <http://www.gnu.org/licenses/>.
    17  
    18  package influxdb
    19  
    20  import (
    21  	"fmt"
    22  	uurl "net/url"
    23  	"time"
    24  
    25  	"github.com/AigarNetwork/aigar/log"
    26  	"github.com/AigarNetwork/aigar/metrics"
    27  	"github.com/influxdata/influxdb/client"
    28  )
    29  
    30  type reporter struct {
    31  	reg      metrics.Registry
    32  	interval time.Duration
    33  
    34  	url       uurl.URL
    35  	database  string
    36  	username  string
    37  	password  string
    38  	namespace string
    39  	tags      map[string]string
    40  
    41  	client *client.Client
    42  
    43  	cache map[string]int64
    44  }
    45  
    46  // InfluxDB starts a InfluxDB reporter which will post the from the given metrics.Registry at each d interval.
    47  func InfluxDB(r metrics.Registry, d time.Duration, url, database, username, password, namespace string) {
    48  	InfluxDBWithTags(r, d, url, database, username, password, namespace, nil)
    49  }
    50  
    51  // InfluxDBWithTags starts a InfluxDB reporter which will post the from the given metrics.Registry at each d interval with the specified tags
    52  func InfluxDBWithTags(r metrics.Registry, d time.Duration, url, database, username, password, namespace string, tags map[string]string) {
    53  	u, err := uurl.Parse(url)
    54  	if err != nil {
    55  		log.Warn("Unable to parse InfluxDB", "url", url, "err", err)
    56  		return
    57  	}
    58  
    59  	rep := &reporter{
    60  		reg:       r,
    61  		interval:  d,
    62  		url:       *u,
    63  		database:  database,
    64  		username:  username,
    65  		password:  password,
    66  		namespace: namespace,
    67  		tags:      tags,
    68  		cache:     make(map[string]int64),
    69  	}
    70  	if err := rep.makeClient(); err != nil {
    71  		log.Warn("Unable to make InfluxDB client", "err", err)
    72  		return
    73  	}
    74  
    75  	rep.run()
    76  }
    77  
    78  // InfluxDBWithTagsOnce runs once an InfluxDB reporter and post the given metrics.Registry with the specified tags
    79  func InfluxDBWithTagsOnce(r metrics.Registry, url, database, username, password, namespace string, tags map[string]string) error {
    80  	u, err := uurl.Parse(url)
    81  	if err != nil {
    82  		return fmt.Errorf("Unable to parse InfluxDB. url: %s, err: %v", url, err)
    83  	}
    84  
    85  	rep := &reporter{
    86  		reg:       r,
    87  		url:       *u,
    88  		database:  database,
    89  		username:  username,
    90  		password:  password,
    91  		namespace: namespace,
    92  		tags:      tags,
    93  		cache:     make(map[string]int64),
    94  	}
    95  	if err := rep.makeClient(); err != nil {
    96  		return fmt.Errorf("Unable to make InfluxDB client. err: %v", err)
    97  	}
    98  
    99  	if err := rep.send(); err != nil {
   100  		return fmt.Errorf("Unable to send to InfluxDB. err: %v", err)
   101  	}
   102  
   103  	return nil
   104  }
   105  
   106  func (r *reporter) makeClient() (err error) {
   107  	r.client, err = client.NewClient(client.Config{
   108  		URL:      r.url,
   109  		Username: r.username,
   110  		Password: r.password,
   111  		Timeout:  10 * time.Second,
   112  	})
   113  
   114  	return
   115  }
   116  
   117  func (r *reporter) run() {
   118  	intervalTicker := time.Tick(r.interval)
   119  	pingTicker := time.Tick(time.Second * 5)
   120  
   121  	for {
   122  		select {
   123  		case <-intervalTicker:
   124  			if err := r.send(); err != nil {
   125  				log.Warn("Unable to send to InfluxDB", "err", err)
   126  			}
   127  		case <-pingTicker:
   128  			_, _, err := r.client.Ping()
   129  			if err != nil {
   130  				log.Warn("Got error while sending a ping to InfluxDB, trying to recreate client", "err", err)
   131  
   132  				if err = r.makeClient(); err != nil {
   133  					log.Warn("Unable to make InfluxDB client", "err", err)
   134  				}
   135  			}
   136  		}
   137  	}
   138  }
   139  
   140  func (r *reporter) send() error {
   141  	var pts []client.Point
   142  
   143  	r.reg.Each(func(name string, i interface{}) {
   144  		now := time.Now()
   145  		namespace := r.namespace
   146  
   147  		switch metric := i.(type) {
   148  		case metrics.Counter:
   149  			v := metric.Count()
   150  			l := r.cache[name]
   151  			pts = append(pts, client.Point{
   152  				Measurement: fmt.Sprintf("%s%s.count", namespace, name),
   153  				Tags:        r.tags,
   154  				Fields: map[string]interface{}{
   155  					"value": v - l,
   156  				},
   157  				Time: now,
   158  			})
   159  			r.cache[name] = v
   160  		case metrics.Gauge:
   161  			ms := metric.Snapshot()
   162  			pts = append(pts, client.Point{
   163  				Measurement: fmt.Sprintf("%s%s.gauge", namespace, name),
   164  				Tags:        r.tags,
   165  				Fields: map[string]interface{}{
   166  					"value": ms.Value(),
   167  				},
   168  				Time: now,
   169  			})
   170  		case metrics.GaugeFloat64:
   171  			ms := metric.Snapshot()
   172  			pts = append(pts, client.Point{
   173  				Measurement: fmt.Sprintf("%s%s.gauge", namespace, name),
   174  				Tags:        r.tags,
   175  				Fields: map[string]interface{}{
   176  					"value": ms.Value(),
   177  				},
   178  				Time: now,
   179  			})
   180  		case metrics.Histogram:
   181  			ms := metric.Snapshot()
   182  			ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
   183  			pts = append(pts, client.Point{
   184  				Measurement: fmt.Sprintf("%s%s.histogram", namespace, name),
   185  				Tags:        r.tags,
   186  				Fields: map[string]interface{}{
   187  					"count":    ms.Count(),
   188  					"max":      ms.Max(),
   189  					"mean":     ms.Mean(),
   190  					"min":      ms.Min(),
   191  					"stddev":   ms.StdDev(),
   192  					"variance": ms.Variance(),
   193  					"p50":      ps[0],
   194  					"p75":      ps[1],
   195  					"p95":      ps[2],
   196  					"p99":      ps[3],
   197  					"p999":     ps[4],
   198  					"p9999":    ps[5],
   199  				},
   200  				Time: now,
   201  			})
   202  		case metrics.Meter:
   203  			ms := metric.Snapshot()
   204  			pts = append(pts, client.Point{
   205  				Measurement: fmt.Sprintf("%s%s.meter", namespace, name),
   206  				Tags:        r.tags,
   207  				Fields: map[string]interface{}{
   208  					"count": ms.Count(),
   209  					"m1":    ms.Rate1(),
   210  					"m5":    ms.Rate5(),
   211  					"m15":   ms.Rate15(),
   212  					"mean":  ms.RateMean(),
   213  				},
   214  				Time: now,
   215  			})
   216  		case metrics.Timer:
   217  			ms := metric.Snapshot()
   218  			ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
   219  			pts = append(pts, client.Point{
   220  				Measurement: fmt.Sprintf("%s%s.timer", namespace, name),
   221  				Tags:        r.tags,
   222  				Fields: map[string]interface{}{
   223  					"count":    ms.Count(),
   224  					"max":      ms.Max(),
   225  					"mean":     ms.Mean(),
   226  					"min":      ms.Min(),
   227  					"stddev":   ms.StdDev(),
   228  					"variance": ms.Variance(),
   229  					"p50":      ps[0],
   230  					"p75":      ps[1],
   231  					"p95":      ps[2],
   232  					"p99":      ps[3],
   233  					"p999":     ps[4],
   234  					"p9999":    ps[5],
   235  					"m1":       ms.Rate1(),
   236  					"m5":       ms.Rate5(),
   237  					"m15":      ms.Rate15(),
   238  					"meanrate": ms.RateMean(),
   239  				},
   240  				Time: now,
   241  			})
   242  		case metrics.ResettingTimer:
   243  			t := metric.Snapshot()
   244  
   245  			if len(t.Values()) > 0 {
   246  				ps := t.Percentiles([]float64{50, 95, 99})
   247  				val := t.Values()
   248  				pts = append(pts, client.Point{
   249  					Measurement: fmt.Sprintf("%s%s.span", namespace, name),
   250  					Tags:        r.tags,
   251  					Fields: map[string]interface{}{
   252  						"count": len(val),
   253  						"max":   val[len(val)-1],
   254  						"mean":  t.Mean(),
   255  						"min":   val[0],
   256  						"p50":   ps[0],
   257  						"p95":   ps[1],
   258  						"p99":   ps[2],
   259  					},
   260  					Time: now,
   261  				})
   262  			}
   263  		}
   264  	})
   265  
   266  	bps := client.BatchPoints{
   267  		Points:   pts,
   268  		Database: r.database,
   269  	}
   270  
   271  	_, err := r.client.Write(bps)
   272  	return err
   273  }