github.com/MikyChow/arbitrum-go-ethereum@v0.0.0-20230306102812-078da49636de/metrics/influxdb/influxdbv2.go (about)

     1  // The go-ethereum library is distributed in the hope that it will be useful,
     2  // but WITHOUT ANY WARRANTY; without even the implied warranty of
     3  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
     4  // GNU Lesser General Public License for more details.
     5  //
     6  // You should have received a copy of the GNU Lesser General Public License
     7  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
     8  package influxdb
     9  
    10  import (
    11  	"context"
    12  	"fmt"
    13  	"time"
    14  
    15  	"github.com/MikyChow/arbitrum-go-ethereum/log"
    16  	"github.com/MikyChow/arbitrum-go-ethereum/metrics"
    17  	influxdb2 "github.com/influxdata/influxdb-client-go/v2"
    18  	"github.com/influxdata/influxdb-client-go/v2/api"
    19  )
    20  
    21  type v2Reporter struct {
    22  	reg      metrics.Registry
    23  	interval time.Duration
    24  
    25  	endpoint     string
    26  	token        string
    27  	bucket       string
    28  	organization string
    29  	namespace    string
    30  	tags         map[string]string
    31  
    32  	client influxdb2.Client
    33  	write  api.WriteAPI
    34  
    35  	cache map[string]int64
    36  }
    37  
    38  // InfluxDBWithTags starts a InfluxDB reporter which will post the from the given metrics.Registry at each d interval with the specified tags
    39  func InfluxDBV2WithTags(r metrics.Registry, d time.Duration, endpoint string, token string, bucket string, organization string, namespace string, tags map[string]string) {
    40  	rep := &v2Reporter{
    41  		reg:          r,
    42  		interval:     d,
    43  		endpoint:     endpoint,
    44  		token:        token,
    45  		bucket:       bucket,
    46  		organization: organization,
    47  		namespace:    namespace,
    48  		tags:         tags,
    49  		cache:        make(map[string]int64),
    50  	}
    51  
    52  	rep.client = influxdb2.NewClient(rep.endpoint, rep.token)
    53  	defer rep.client.Close()
    54  
    55  	// async write client
    56  	rep.write = rep.client.WriteAPI(rep.organization, rep.bucket)
    57  	errorsCh := rep.write.Errors()
    58  
    59  	// have to handle write errors in a separate goroutine like this b/c the channel is unbuffered and will block writes if not read
    60  	go func() {
    61  		for err := range errorsCh {
    62  			log.Warn("write error", "err", err.Error())
    63  		}
    64  	}()
    65  	rep.run()
    66  }
    67  
    68  func (r *v2Reporter) run() {
    69  	intervalTicker := time.NewTicker(r.interval)
    70  	pingTicker := time.NewTicker(time.Second * 5)
    71  
    72  	for {
    73  		select {
    74  		case <-intervalTicker.C:
    75  			r.send()
    76  		case <-pingTicker.C:
    77  			_, err := r.client.Health(context.Background())
    78  			if err != nil {
    79  				log.Warn("Got error from influxdb client health check", "err", err.Error())
    80  			}
    81  		}
    82  	}
    83  }
    84  
    85  func (r *v2Reporter) send() {
    86  	r.reg.Each(func(name string, i interface{}) {
    87  		now := time.Now()
    88  		namespace := r.namespace
    89  
    90  		switch metric := i.(type) {
    91  		case metrics.Counter:
    92  			v := metric.Count()
    93  			l := r.cache[name]
    94  
    95  			measurement := fmt.Sprintf("%s%s.count", namespace, name)
    96  			fields := map[string]interface{}{
    97  				"value": v - l,
    98  			}
    99  
   100  			pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   101  			r.write.WritePoint(pt)
   102  
   103  			r.cache[name] = v
   104  
   105  		case metrics.Gauge:
   106  			ms := metric.Snapshot()
   107  
   108  			measurement := fmt.Sprintf("%s%s.gauge", namespace, name)
   109  			fields := map[string]interface{}{
   110  				"value": ms.Value(),
   111  			}
   112  
   113  			pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   114  			r.write.WritePoint(pt)
   115  
   116  		case metrics.GaugeFloat64:
   117  			ms := metric.Snapshot()
   118  
   119  			measurement := fmt.Sprintf("%s%s.gauge", namespace, name)
   120  			fields := map[string]interface{}{
   121  				"value": ms.Value(),
   122  			}
   123  
   124  			pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   125  			r.write.WritePoint(pt)
   126  
   127  		case metrics.Histogram:
   128  			ms := metric.Snapshot()
   129  
   130  			if ms.Count() > 0 {
   131  				ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
   132  				measurement := fmt.Sprintf("%s%s.histogram", namespace, name)
   133  				fields := map[string]interface{}{
   134  					"count":    ms.Count(),
   135  					"max":      ms.Max(),
   136  					"mean":     ms.Mean(),
   137  					"min":      ms.Min(),
   138  					"stddev":   ms.StdDev(),
   139  					"variance": ms.Variance(),
   140  					"p50":      ps[0],
   141  					"p75":      ps[1],
   142  					"p95":      ps[2],
   143  					"p99":      ps[3],
   144  					"p999":     ps[4],
   145  					"p9999":    ps[5],
   146  				}
   147  
   148  				pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   149  				r.write.WritePoint(pt)
   150  			}
   151  
   152  		case metrics.Meter:
   153  			ms := metric.Snapshot()
   154  
   155  			measurement := fmt.Sprintf("%s%s.meter", namespace, name)
   156  			fields := map[string]interface{}{
   157  				"count": ms.Count(),
   158  				"m1":    ms.Rate1(),
   159  				"m5":    ms.Rate5(),
   160  				"m15":   ms.Rate15(),
   161  				"mean":  ms.RateMean(),
   162  			}
   163  
   164  			pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   165  			r.write.WritePoint(pt)
   166  
   167  		case metrics.Timer:
   168  			ms := metric.Snapshot()
   169  			ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
   170  
   171  			measurement := fmt.Sprintf("%s%s.timer", namespace, name)
   172  			fields := map[string]interface{}{
   173  				"count":    ms.Count(),
   174  				"max":      ms.Max(),
   175  				"mean":     ms.Mean(),
   176  				"min":      ms.Min(),
   177  				"stddev":   ms.StdDev(),
   178  				"variance": ms.Variance(),
   179  				"p50":      ps[0],
   180  				"p75":      ps[1],
   181  				"p95":      ps[2],
   182  				"p99":      ps[3],
   183  				"p999":     ps[4],
   184  				"p9999":    ps[5],
   185  				"m1":       ms.Rate1(),
   186  				"m5":       ms.Rate5(),
   187  				"m15":      ms.Rate15(),
   188  				"meanrate": ms.RateMean(),
   189  			}
   190  
   191  			pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   192  			r.write.WritePoint(pt)
   193  
   194  		case metrics.ResettingTimer:
   195  			t := metric.Snapshot()
   196  
   197  			if len(t.Values()) > 0 {
   198  				ps := t.Percentiles([]float64{50, 95, 99})
   199  				val := t.Values()
   200  
   201  				measurement := fmt.Sprintf("%s%s.span", namespace, name)
   202  				fields := map[string]interface{}{
   203  					"count": len(val),
   204  					"max":   val[len(val)-1],
   205  					"mean":  t.Mean(),
   206  					"min":   val[0],
   207  					"p50":   ps[0],
   208  					"p95":   ps[1],
   209  					"p99":   ps[2],
   210  				}
   211  
   212  				pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   213  				r.write.WritePoint(pt)
   214  			}
   215  		}
   216  	})
   217  
   218  	// Force all unwritten data to be sent
   219  	r.write.Flush()
   220  }