github.com/cryptotooltop/go-ethereum@v0.0.0-20231103184714-151d1922f3e5/metrics/influxdb/influxdbv2.go (about)

     1  //
     2  // The go-ethereum library is distributed in the hope that it will be useful,
     3  // but WITHOUT ANY WARRANTY; without even the implied warranty of
     4  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
     5  // GNU Lesser General Public License for more details.
     6  //
     7  // You should have received a copy of the GNU Lesser General Public License
     8  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
     9  package influxdb
    10  
    11  import (
    12  	"context"
    13  	"fmt"
    14  	"time"
    15  
    16  	influxdb2 "github.com/influxdata/influxdb-client-go/v2"
    17  	"github.com/influxdata/influxdb-client-go/v2/api"
    18  
    19  	"github.com/scroll-tech/go-ethereum/log"
    20  	"github.com/scroll-tech/go-ethereum/metrics"
    21  )
    22  
    23  type v2Reporter struct {
    24  	reg      metrics.Registry
    25  	interval time.Duration
    26  
    27  	endpoint     string
    28  	token        string
    29  	bucket       string
    30  	organization string
    31  	namespace    string
    32  	tags         map[string]string
    33  
    34  	client influxdb2.Client
    35  	write  api.WriteAPI
    36  
    37  	cache map[string]int64
    38  }
    39  
    40  // InfluxDBWithTags starts a InfluxDB reporter which will post the from the given metrics.Registry at each d interval with the specified tags
    41  func InfluxDBV2WithTags(r metrics.Registry, d time.Duration, endpoint string, token string, bucket string, organization string, namespace string, tags map[string]string) {
    42  	rep := &v2Reporter{
    43  		reg:          r,
    44  		interval:     d,
    45  		endpoint:     endpoint,
    46  		token:        token,
    47  		bucket:       bucket,
    48  		organization: organization,
    49  		namespace:    namespace,
    50  		tags:         tags,
    51  		cache:        make(map[string]int64),
    52  	}
    53  
    54  	rep.client = influxdb2.NewClient(rep.endpoint, rep.token)
    55  	defer rep.client.Close()
    56  
    57  	// async write client
    58  	rep.write = rep.client.WriteAPI(rep.organization, rep.bucket)
    59  	errorsCh := rep.write.Errors()
    60  
    61  	// have to handle write errors in a separate goroutine like this b/c the channel is unbuffered and will block writes if not read
    62  	go func() {
    63  		for err := range errorsCh {
    64  			log.Warn("write error", "err", err.Error())
    65  		}
    66  	}()
    67  	rep.run()
    68  }
    69  
    70  func (r *v2Reporter) run() {
    71  	intervalTicker := time.Tick(r.interval)
    72  	pingTicker := time.Tick(time.Second * 5)
    73  
    74  	for {
    75  		select {
    76  		case <-intervalTicker:
    77  			r.send()
    78  		case <-pingTicker:
    79  			_, err := r.client.Health(context.Background())
    80  			if err != nil {
    81  				log.Warn("Got error from influxdb client health check", "err", err.Error())
    82  			}
    83  		}
    84  	}
    85  
    86  }
    87  
    88  func (r *v2Reporter) send() {
    89  	r.reg.Each(func(name string, i interface{}) {
    90  		now := time.Now()
    91  		namespace := r.namespace
    92  
    93  		switch metric := i.(type) {
    94  
    95  		case metrics.Counter:
    96  			v := metric.Count()
    97  			l := r.cache[name]
    98  
    99  			measurement := fmt.Sprintf("%s%s.count", namespace, name)
   100  			fields := map[string]interface{}{
   101  				"value": v - l,
   102  			}
   103  
   104  			pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   105  			r.write.WritePoint(pt)
   106  
   107  			r.cache[name] = v
   108  
   109  		case metrics.Gauge:
   110  			ms := metric.Snapshot()
   111  
   112  			measurement := fmt.Sprintf("%s%s.gauge", namespace, name)
   113  			fields := map[string]interface{}{
   114  				"value": ms.Value(),
   115  			}
   116  
   117  			pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   118  			r.write.WritePoint(pt)
   119  
   120  		case metrics.GaugeFloat64:
   121  			ms := metric.Snapshot()
   122  
   123  			measurement := fmt.Sprintf("%s%s.gauge", namespace, name)
   124  			fields := map[string]interface{}{
   125  				"value": ms.Value(),
   126  			}
   127  
   128  			pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   129  			r.write.WritePoint(pt)
   130  
   131  		case metrics.Histogram:
   132  			ms := metric.Snapshot()
   133  
   134  			if ms.Count() > 0 {
   135  				ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
   136  				measurement := fmt.Sprintf("%s%s.histogram", namespace, name)
   137  				fields := map[string]interface{}{
   138  					"count":    ms.Count(),
   139  					"max":      ms.Max(),
   140  					"mean":     ms.Mean(),
   141  					"min":      ms.Min(),
   142  					"stddev":   ms.StdDev(),
   143  					"variance": ms.Variance(),
   144  					"p50":      ps[0],
   145  					"p75":      ps[1],
   146  					"p95":      ps[2],
   147  					"p99":      ps[3],
   148  					"p999":     ps[4],
   149  					"p9999":    ps[5],
   150  				}
   151  
   152  				pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   153  				r.write.WritePoint(pt)
   154  			}
   155  
   156  		case metrics.Meter:
   157  			ms := metric.Snapshot()
   158  
   159  			measurement := fmt.Sprintf("%s%s.meter", namespace, name)
   160  			fields := map[string]interface{}{
   161  				"count": ms.Count(),
   162  				"m1":    ms.Rate1(),
   163  				"m5":    ms.Rate5(),
   164  				"m15":   ms.Rate15(),
   165  				"mean":  ms.RateMean(),
   166  			}
   167  
   168  			pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   169  			r.write.WritePoint(pt)
   170  
   171  		case metrics.Timer:
   172  			ms := metric.Snapshot()
   173  			ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
   174  
   175  			measurement := fmt.Sprintf("%s%s.timer", namespace, name)
   176  			fields := map[string]interface{}{
   177  				"count":    ms.Count(),
   178  				"max":      ms.Max(),
   179  				"mean":     ms.Mean(),
   180  				"min":      ms.Min(),
   181  				"stddev":   ms.StdDev(),
   182  				"variance": ms.Variance(),
   183  				"p50":      ps[0],
   184  				"p75":      ps[1],
   185  				"p95":      ps[2],
   186  				"p99":      ps[3],
   187  				"p999":     ps[4],
   188  				"p9999":    ps[5],
   189  				"m1":       ms.Rate1(),
   190  				"m5":       ms.Rate5(),
   191  				"m15":      ms.Rate15(),
   192  				"meanrate": ms.RateMean(),
   193  			}
   194  
   195  			pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   196  			r.write.WritePoint(pt)
   197  
   198  		case metrics.ResettingTimer:
   199  			t := metric.Snapshot()
   200  
   201  			if len(t.Values()) > 0 {
   202  				ps := t.Percentiles([]float64{50, 95, 99})
   203  				val := t.Values()
   204  
   205  				measurement := fmt.Sprintf("%s%s.span", namespace, name)
   206  				fields := map[string]interface{}{
   207  					"count": len(val),
   208  					"max":   val[len(val)-1],
   209  					"mean":  t.Mean(),
   210  					"min":   val[0],
   211  					"p50":   ps[0],
   212  					"p95":   ps[1],
   213  					"p99":   ps[2],
   214  				}
   215  
   216  				pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   217  				r.write.WritePoint(pt)
   218  			}
   219  		}
   220  	})
   221  
   222  	// Force all unwritten data to be sent
   223  	r.write.Flush()
   224  }