github.com/unicornultrafoundation/go-u2u@v1.0.0-rc1.0.20240205080301-e74a83d3fadc/metrics/influxdb/influxdbv2.go (about)

     1  // The go-ethereum library is distributed in the hope that it will be useful,
     2  // but WITHOUT ANY WARRANTY; without even the implied warranty of
     3  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
     4  // GNU Lesser General Public License for more details.
     5  //
     6  // You should have received a copy of the GNU Lesser General Public License
     7  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
     8  package influxdb
     9  
    10  import (
    11  	"context"
    12  	"fmt"
    13  	"time"
    14  
    15  	influxdb2 "github.com/influxdata/influxdb-client-go/v2"
    16  	"github.com/influxdata/influxdb-client-go/v2/api"
    17  	"github.com/unicornultrafoundation/go-u2u/log"
    18  	"github.com/unicornultrafoundation/go-u2u/metrics"
    19  )
    20  
    21  type v2Reporter struct {
    22  	reg      metrics.Registry
    23  	interval time.Duration
    24  
    25  	endpoint     string
    26  	token        string
    27  	bucket       string
    28  	organization string
    29  	namespace    string
    30  	tags         map[string]string
    31  
    32  	client influxdb2.Client
    33  	write  api.WriteAPI
    34  
    35  	cache map[string]int64
    36  }
    37  
    38  // InfluxDBWithTags starts a InfluxDB reporter which will post the from the given metrics.Registry at each d interval with the specified tags
    39  func InfluxDBV2WithTags(r metrics.Registry, d time.Duration, endpoint string, token string, bucket string, organization string, namespace string, tags map[string]string) {
    40  	rep := &v2Reporter{
    41  		reg:          r,
    42  		interval:     d,
    43  		endpoint:     endpoint,
    44  		token:        token,
    45  		bucket:       bucket,
    46  		organization: organization,
    47  		namespace:    namespace,
    48  		tags:         tags,
    49  		cache:        make(map[string]int64),
    50  	}
    51  
    52  	rep.client = influxdb2.NewClient(rep.endpoint, rep.token)
    53  	defer rep.client.Close()
    54  
    55  	// async write client
    56  	rep.write = rep.client.WriteAPI(rep.organization, rep.bucket)
    57  	errorsCh := rep.write.Errors()
    58  
    59  	// have to handle write errors in a separate goroutine like this b/c the channel is unbuffered and will block writes if not read
    60  	go func() {
    61  		for err := range errorsCh {
    62  			log.Warn("write error", "err", err.Error())
    63  		}
    64  	}()
    65  	rep.run()
    66  }
    67  
    68  func (r *v2Reporter) run() {
    69  	intervalTicker := time.Tick(r.interval)
    70  	pingTicker := time.Tick(time.Second * 5)
    71  
    72  	for {
    73  		select {
    74  		case <-intervalTicker:
    75  			r.send()
    76  		case <-pingTicker:
    77  			_, err := r.client.Health(context.Background())
    78  			if err != nil {
    79  				log.Warn("Got error from influxdb client health check", "err", err.Error())
    80  			}
    81  		}
    82  	}
    83  
    84  }
    85  
    86  func (r *v2Reporter) send() {
    87  	r.reg.Each(func(name string, i interface{}) {
    88  		now := time.Now()
    89  		namespace := r.namespace
    90  
    91  		switch metric := i.(type) {
    92  
    93  		case metrics.Counter:
    94  			v := metric.Count()
    95  			l := r.cache[name]
    96  
    97  			measurement := fmt.Sprintf("%s%s.count", namespace, name)
    98  			fields := map[string]interface{}{
    99  				"value": v - l,
   100  			}
   101  
   102  			pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   103  			r.write.WritePoint(pt)
   104  
   105  			r.cache[name] = v
   106  
   107  		case metrics.Gauge:
   108  			ms := metric.Snapshot()
   109  
   110  			measurement := fmt.Sprintf("%s%s.gauge", namespace, name)
   111  			fields := map[string]interface{}{
   112  				"value": ms.Value(),
   113  			}
   114  
   115  			pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   116  			r.write.WritePoint(pt)
   117  
   118  		case metrics.GaugeFloat64:
   119  			ms := metric.Snapshot()
   120  
   121  			measurement := fmt.Sprintf("%s%s.gauge", namespace, name)
   122  			fields := map[string]interface{}{
   123  				"value": ms.Value(),
   124  			}
   125  
   126  			pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   127  			r.write.WritePoint(pt)
   128  
   129  		case metrics.Histogram:
   130  			ms := metric.Snapshot()
   131  
   132  			if ms.Count() > 0 {
   133  				ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
   134  				measurement := fmt.Sprintf("%s%s.histogram", namespace, name)
   135  				fields := map[string]interface{}{
   136  					"count":    ms.Count(),
   137  					"max":      ms.Max(),
   138  					"mean":     ms.Mean(),
   139  					"min":      ms.Min(),
   140  					"stddev":   ms.StdDev(),
   141  					"variance": ms.Variance(),
   142  					"p50":      ps[0],
   143  					"p75":      ps[1],
   144  					"p95":      ps[2],
   145  					"p99":      ps[3],
   146  					"p999":     ps[4],
   147  					"p9999":    ps[5],
   148  				}
   149  
   150  				pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   151  				r.write.WritePoint(pt)
   152  			}
   153  
   154  		case metrics.Meter:
   155  			ms := metric.Snapshot()
   156  
   157  			measurement := fmt.Sprintf("%s%s.meter", namespace, name)
   158  			fields := map[string]interface{}{
   159  				"count": ms.Count(),
   160  				"m1":    ms.Rate1(),
   161  				"m5":    ms.Rate5(),
   162  				"m15":   ms.Rate15(),
   163  				"mean":  ms.RateMean(),
   164  			}
   165  
   166  			pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   167  			r.write.WritePoint(pt)
   168  
   169  		case metrics.Timer:
   170  			ms := metric.Snapshot()
   171  			ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
   172  
   173  			measurement := fmt.Sprintf("%s%s.timer", namespace, name)
   174  			fields := map[string]interface{}{
   175  				"count":    ms.Count(),
   176  				"max":      ms.Max(),
   177  				"mean":     ms.Mean(),
   178  				"min":      ms.Min(),
   179  				"stddev":   ms.StdDev(),
   180  				"variance": ms.Variance(),
   181  				"p50":      ps[0],
   182  				"p75":      ps[1],
   183  				"p95":      ps[2],
   184  				"p99":      ps[3],
   185  				"p999":     ps[4],
   186  				"p9999":    ps[5],
   187  				"m1":       ms.Rate1(),
   188  				"m5":       ms.Rate5(),
   189  				"m15":      ms.Rate15(),
   190  				"meanrate": ms.RateMean(),
   191  			}
   192  
   193  			pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   194  			r.write.WritePoint(pt)
   195  
   196  		case metrics.ResettingTimer:
   197  			t := metric.Snapshot()
   198  
   199  			if len(t.Values()) > 0 {
   200  				ps := t.Percentiles([]float64{50, 95, 99})
   201  				val := t.Values()
   202  
   203  				measurement := fmt.Sprintf("%s%s.span", namespace, name)
   204  				fields := map[string]interface{}{
   205  					"count": len(val),
   206  					"max":   val[len(val)-1],
   207  					"mean":  t.Mean(),
   208  					"min":   val[0],
   209  					"p50":   ps[0],
   210  					"p95":   ps[1],
   211  					"p99":   ps[2],
   212  				}
   213  
   214  				pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   215  				r.write.WritePoint(pt)
   216  			}
   217  		}
   218  	})
   219  
   220  	// Force all unwritten data to be sent
   221  	r.write.Flush()
   222  }