github.com/DxChainNetwork/dxc@v0.8.1-0.20220824085222-1162e304b6e7/metrics/influxdb/influxdbv2.go (about)

     1  //
     2  // The go-ethereum library is distributed in the hope that it will be useful,
     3  // but WITHOUT ANY WARRANTY; without even the implied warranty of
     4  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
     5  // GNU Lesser General Public License for more details.
     6  //
     7  // You should have received a copy of the GNU Lesser General Public License
     8  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
     9  package influxdb
    10  
    11  import (
    12  	"context"
    13  	"fmt"
    14  	"time"
    15  
    16  	"github.com/DxChainNetwork/dxc/log"
    17  	"github.com/DxChainNetwork/dxc/metrics"
    18  	influxdb2 "github.com/influxdata/influxdb-client-go/v2"
    19  	"github.com/influxdata/influxdb-client-go/v2/api"
    20  )
    21  
    22  type v2Reporter struct {
    23  	reg      metrics.Registry
    24  	interval time.Duration
    25  
    26  	endpoint     string
    27  	token        string
    28  	bucket       string
    29  	organization string
    30  	namespace    string
    31  	tags         map[string]string
    32  
    33  	client influxdb2.Client
    34  	write  api.WriteAPI
    35  
    36  	cache map[string]int64
    37  }
    38  
    39  // InfluxDBWithTags starts a InfluxDB reporter which will post the from the given metrics.Registry at each d interval with the specified tags
    40  func InfluxDBV2WithTags(r metrics.Registry, d time.Duration, endpoint string, token string, bucket string, organization string, namespace string, tags map[string]string) {
    41  	rep := &v2Reporter{
    42  		reg:          r,
    43  		interval:     d,
    44  		endpoint:     endpoint,
    45  		token:        token,
    46  		bucket:       bucket,
    47  		organization: organization,
    48  		namespace:    namespace,
    49  		tags:         tags,
    50  		cache:        make(map[string]int64),
    51  	}
    52  
    53  	rep.client = influxdb2.NewClient(rep.endpoint, rep.token)
    54  	defer rep.client.Close()
    55  
    56  	// async write client
    57  	rep.write = rep.client.WriteAPI(rep.organization, rep.bucket)
    58  	errorsCh := rep.write.Errors()
    59  
    60  	// have to handle write errors in a separate goroutine like this b/c the channel is unbuffered and will block writes if not read
    61  	go func() {
    62  		for err := range errorsCh {
    63  			log.Warn("write error", "err", err.Error())
    64  		}
    65  	}()
    66  	rep.run()
    67  }
    68  
    69  func (r *v2Reporter) run() {
    70  	intervalTicker := time.Tick(r.interval)
    71  	pingTicker := time.Tick(time.Second * 5)
    72  
    73  	for {
    74  		select {
    75  		case <-intervalTicker:
    76  			r.send()
    77  		case <-pingTicker:
    78  			_, err := r.client.Health(context.Background())
    79  			if err != nil {
    80  				log.Warn("Got error from influxdb client health check", "err", err.Error())
    81  			}
    82  		}
    83  	}
    84  
    85  }
    86  
    87  func (r *v2Reporter) send() {
    88  	r.reg.Each(func(name string, i interface{}) {
    89  		now := time.Now()
    90  		namespace := r.namespace
    91  
    92  		switch metric := i.(type) {
    93  
    94  		case metrics.Counter:
    95  			v := metric.Count()
    96  			l := r.cache[name]
    97  
    98  			measurement := fmt.Sprintf("%s%s.count", namespace, name)
    99  			fields := map[string]interface{}{
   100  				"value": v - l,
   101  			}
   102  
   103  			pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   104  			r.write.WritePoint(pt)
   105  
   106  			r.cache[name] = v
   107  
   108  		case metrics.Gauge:
   109  			ms := metric.Snapshot()
   110  
   111  			measurement := fmt.Sprintf("%s%s.gauge", namespace, name)
   112  			fields := map[string]interface{}{
   113  				"value": ms.Value(),
   114  			}
   115  
   116  			pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   117  			r.write.WritePoint(pt)
   118  
   119  		case metrics.GaugeFloat64:
   120  			ms := metric.Snapshot()
   121  
   122  			measurement := fmt.Sprintf("%s%s.gauge", namespace, name)
   123  			fields := map[string]interface{}{
   124  				"value": ms.Value(),
   125  			}
   126  
   127  			pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   128  			r.write.WritePoint(pt)
   129  
   130  		case metrics.Histogram:
   131  			ms := metric.Snapshot()
   132  
   133  			if ms.Count() > 0 {
   134  				ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
   135  				measurement := fmt.Sprintf("%s%s.histogram", namespace, name)
   136  				fields := map[string]interface{}{
   137  					"count":    ms.Count(),
   138  					"max":      ms.Max(),
   139  					"mean":     ms.Mean(),
   140  					"min":      ms.Min(),
   141  					"stddev":   ms.StdDev(),
   142  					"variance": ms.Variance(),
   143  					"p50":      ps[0],
   144  					"p75":      ps[1],
   145  					"p95":      ps[2],
   146  					"p99":      ps[3],
   147  					"p999":     ps[4],
   148  					"p9999":    ps[5],
   149  				}
   150  
   151  				pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   152  				r.write.WritePoint(pt)
   153  			}
   154  
   155  		case metrics.Meter:
   156  			ms := metric.Snapshot()
   157  
   158  			measurement := fmt.Sprintf("%s%s.meter", namespace, name)
   159  			fields := map[string]interface{}{
   160  				"count": ms.Count(),
   161  				"m1":    ms.Rate1(),
   162  				"m5":    ms.Rate5(),
   163  				"m15":   ms.Rate15(),
   164  				"mean":  ms.RateMean(),
   165  			}
   166  
   167  			pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   168  			r.write.WritePoint(pt)
   169  
   170  		case metrics.Timer:
   171  			ms := metric.Snapshot()
   172  			ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
   173  
   174  			measurement := fmt.Sprintf("%s%s.timer", namespace, name)
   175  			fields := map[string]interface{}{
   176  				"count":    ms.Count(),
   177  				"max":      ms.Max(),
   178  				"mean":     ms.Mean(),
   179  				"min":      ms.Min(),
   180  				"stddev":   ms.StdDev(),
   181  				"variance": ms.Variance(),
   182  				"p50":      ps[0],
   183  				"p75":      ps[1],
   184  				"p95":      ps[2],
   185  				"p99":      ps[3],
   186  				"p999":     ps[4],
   187  				"p9999":    ps[5],
   188  				"m1":       ms.Rate1(),
   189  				"m5":       ms.Rate5(),
   190  				"m15":      ms.Rate15(),
   191  				"meanrate": ms.RateMean(),
   192  			}
   193  
   194  			pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   195  			r.write.WritePoint(pt)
   196  
   197  		case metrics.ResettingTimer:
   198  			t := metric.Snapshot()
   199  
   200  			if len(t.Values()) > 0 {
   201  				ps := t.Percentiles([]float64{50, 95, 99})
   202  				val := t.Values()
   203  
   204  				measurement := fmt.Sprintf("%s%s.span", namespace, name)
   205  				fields := map[string]interface{}{
   206  					"count": len(val),
   207  					"max":   val[len(val)-1],
   208  					"mean":  t.Mean(),
   209  					"min":   val[0],
   210  					"p50":   ps[0],
   211  					"p95":   ps[1],
   212  					"p99":   ps[2],
   213  				}
   214  
   215  				pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
   216  				r.write.WritePoint(pt)
   217  			}
   218  		}
   219  	})
   220  
   221  	// Force all unwritten data to be sent
   222  	r.write.Flush()
   223  }