github.com/authcall/reference-optimistic-geth@v0.0.0-20220816224302-06313bfeb8d2/metrics/influxdb/influxdbv2.go (about)

     1  //
     2  // The go-ethereum library is distributed in the hope that it will be useful,
     3  // but WITHOUT ANY WARRANTY; without even the implied warranty of
     4  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
     5  // GNU Lesser General Public License for more details.
     6  //
     7  // You should have received a copy of the GNU Lesser General Public License
     8  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
     9  package influxdb
    10  
    11  import (
    12  	"context"
    13  	"fmt"
    14  	"time"
    15  
    16  	"github.com/ethereum/go-ethereum/log"
    17  	"github.com/ethereum/go-ethereum/metrics"
    18  	influxdb2 "github.com/influxdata/influxdb-client-go/v2"
    19  	"github.com/influxdata/influxdb-client-go/v2/api"
    20  )
    21  
    22  type v2Reporter struct {
    23  	reg      metrics.Registry
    24  	interval time.Duration
    25  
    26  	endpoint     string
    27  	token        string
    28  	bucket       string
    29  	organization string
    30  	namespace    string
    31  	tags         map[string]string
    32  
    33  	client influxdb2.Client
    34  	write  api.WriteAPI
    35  
    36  	cache map[string]int64
    37  }
    38  
    39  // InfluxDBWithTags starts a InfluxDB reporter which will post the from the given metrics.Registry at each d interval with the specified tags
    40  func InfluxDBV2WithTags(r metrics.Registry, d time.Duration, endpoint string, token string, bucket string, organization string, namespace string, tags map[string]string) {
    41  	rep := &v2Reporter{
    42  		reg:          r,
    43  		interval:     d,
    44  		endpoint:     endpoint,
    45  		token:        token,
    46  		bucket:       bucket,
    47  		organization: organization,
    48  		namespace:    namespace,
    49  		tags:         tags,
    50  		cache:        make(map[string]int64),
    51  	}
    52  
    53  	rep.client = influxdb2.NewClient(rep.endpoint, rep.token)
    54  	defer rep.client.Close()
    55  
    56  	// async write client
    57  	rep.write = rep.client.WriteAPI(rep.organization, rep.bucket)
    58  	errorsCh := rep.write.Errors()
    59  
    60  	// have to handle write errors in a separate goroutine like this b/c the channel is unbuffered and will block writes if not read
    61  	go func() {
    62  		for err := range errorsCh {
    63  			log.Warn("write error", "err", err.Error())
    64  		}
    65  	}()
    66  	rep.run()
    67  }
    68  
    69  func (r *v2Reporter) run() {
    70  	intervalTicker := time.NewTicker(r.interval)
    71  	pingTicker := time.NewTicker(time.Second * 5)
    72  
    73  	for {
    74  		select {
    75  		case <-intervalTicker.C:
    76  			r.send()
    77  		case <-pingTicker.C:
    78  			_, err := r.client.Health(context.Background())
    79  			if err != nil {
    80  				log.Warn("Got error from influxdb client health check", "err", err.Error())
    81  			}
    82  		}
    83  	}
    84  }
    85  
    86  func (r *v2Reporter) send() {
    87  	r.reg.Each(func(name string, i interface{}) {
    88  		now := time.Now()
    89  		namespace := r.namespace
    90  
    91  		switch metric := i.(type) {
    92  		case metrics.Counter:
    93  			v := metric.Count()
    94  			l := r.cache[name]
    95  
    96  			measurement := fmt.Sprintf("%s%s.count", namespace, name)
    97  			fields := map[string]interface{}{
    98  				"value": v - l,
    99  			}
   100  
   101  			pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   102  			r.write.WritePoint(pt)
   103  
   104  			r.cache[name] = v
   105  
   106  		case metrics.Gauge:
   107  			ms := metric.Snapshot()
   108  
   109  			measurement := fmt.Sprintf("%s%s.gauge", namespace, name)
   110  			fields := map[string]interface{}{
   111  				"value": ms.Value(),
   112  			}
   113  
   114  			pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   115  			r.write.WritePoint(pt)
   116  
   117  		case metrics.GaugeFloat64:
   118  			ms := metric.Snapshot()
   119  
   120  			measurement := fmt.Sprintf("%s%s.gauge", namespace, name)
   121  			fields := map[string]interface{}{
   122  				"value": ms.Value(),
   123  			}
   124  
   125  			pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   126  			r.write.WritePoint(pt)
   127  
   128  		case metrics.Histogram:
   129  			ms := metric.Snapshot()
   130  
   131  			if ms.Count() > 0 {
   132  				ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
   133  				measurement := fmt.Sprintf("%s%s.histogram", namespace, name)
   134  				fields := map[string]interface{}{
   135  					"count":    ms.Count(),
   136  					"max":      ms.Max(),
   137  					"mean":     ms.Mean(),
   138  					"min":      ms.Min(),
   139  					"stddev":   ms.StdDev(),
   140  					"variance": ms.Variance(),
   141  					"p50":      ps[0],
   142  					"p75":      ps[1],
   143  					"p95":      ps[2],
   144  					"p99":      ps[3],
   145  					"p999":     ps[4],
   146  					"p9999":    ps[5],
   147  				}
   148  
   149  				pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   150  				r.write.WritePoint(pt)
   151  			}
   152  
   153  		case metrics.Meter:
   154  			ms := metric.Snapshot()
   155  
   156  			measurement := fmt.Sprintf("%s%s.meter", namespace, name)
   157  			fields := map[string]interface{}{
   158  				"count": ms.Count(),
   159  				"m1":    ms.Rate1(),
   160  				"m5":    ms.Rate5(),
   161  				"m15":   ms.Rate15(),
   162  				"mean":  ms.RateMean(),
   163  			}
   164  
   165  			pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   166  			r.write.WritePoint(pt)
   167  
   168  		case metrics.Timer:
   169  			ms := metric.Snapshot()
   170  			ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
   171  
   172  			measurement := fmt.Sprintf("%s%s.timer", namespace, name)
   173  			fields := map[string]interface{}{
   174  				"count":    ms.Count(),
   175  				"max":      ms.Max(),
   176  				"mean":     ms.Mean(),
   177  				"min":      ms.Min(),
   178  				"stddev":   ms.StdDev(),
   179  				"variance": ms.Variance(),
   180  				"p50":      ps[0],
   181  				"p75":      ps[1],
   182  				"p95":      ps[2],
   183  				"p99":      ps[3],
   184  				"p999":     ps[4],
   185  				"p9999":    ps[5],
   186  				"m1":       ms.Rate1(),
   187  				"m5":       ms.Rate5(),
   188  				"m15":      ms.Rate15(),
   189  				"meanrate": ms.RateMean(),
   190  			}
   191  
   192  			pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   193  			r.write.WritePoint(pt)
   194  
   195  		case metrics.ResettingTimer:
   196  			t := metric.Snapshot()
   197  
   198  			if len(t.Values()) > 0 {
   199  				ps := t.Percentiles([]float64{50, 95, 99})
   200  				val := t.Values()
   201  
   202  				measurement := fmt.Sprintf("%s%s.span", namespace, name)
   203  				fields := map[string]interface{}{
   204  					"count": len(val),
   205  					"max":   val[len(val)-1],
   206  					"mean":  t.Mean(),
   207  					"min":   val[0],
   208  					"p50":   ps[0],
   209  					"p95":   ps[1],
   210  					"p99":   ps[2],
   211  				}
   212  
   213  				pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   214  				r.write.WritePoint(pt)
   215  			}
   216  		}
   217  	})
   218  
   219  	// Force all unwritten data to be sent
   220  	r.write.Flush()
   221  }