github.com/linapex/ethereum-go-chinese@v0.0.0-20190316121929-f8b7a73c3fa1/metrics/influxdb/influxdb.go (about) 1 2 //<developer> 3 // <name>linapex 曹一峰</name> 4 // <email>linapex@163.com</email> 5 // <wx>superexc</wx> 6 // <qqgroup>128148617</qqgroup> 7 // <url>https://jsq.ink</url> 8 // <role>pku engineer</role> 9 // <date>2019-03-16 19:16:40</date> 10 //</624450099094753280> 11 12 package influxdb 13 14 import ( 15 "fmt" 16 uurl "net/url" 17 "time" 18 19 "github.com/ethereum/go-ethereum/log" 20 "github.com/ethereum/go-ethereum/metrics" 21 "github.com/influxdata/influxdb/client" 22 ) 23 24 type reporter struct { 25 reg metrics.Registry 26 interval time.Duration 27 28 url uurl.URL 29 database string 30 username string 31 password string 32 namespace string 33 tags map[string]string 34 35 client *client.Client 36 37 cache map[string]int64 38 } 39 40 //influxdb启动一个influxdb报告程序,该报告程序将在每个d间隔发布来自给定metrics.registry的。 41 func InfluxDB(r metrics.Registry, d time.Duration, url, database, username, password, namespace string) { 42 InfluxDBWithTags(r, d, url, database, username, password, namespace, nil) 43 } 44 45 //influxdb with tags启动一个influxdb报告程序,该报告程序将在每个d间隔使用指定的标记从给定的metrics.registry中发布 46 func InfluxDBWithTags(r metrics.Registry, d time.Duration, url, database, username, password, namespace string, tags map[string]string) { 47 u, err := uurl.Parse(url) 48 if err != nil { 49 log.Warn("Unable to parse InfluxDB", "url", url, "err", err) 50 return 51 } 52 53 rep := &reporter{ 54 reg: r, 55 interval: d, 56 url: *u, 57 database: database, 58 username: username, 59 password: password, 60 namespace: namespace, 61 tags: tags, 62 cache: make(map[string]int64), 63 } 64 if err := rep.makeClient(); err != nil { 65 log.Warn("Unable to make InfluxDB client", "err", err) 66 return 67 } 68 69 rep.run() 70 } 71 72 //influxdbwithTagsOnce运行一次influxdb报告程序并发布给定的度量值。带有指定标记的注册表 73 func InfluxDBWithTagsOnce(r metrics.Registry, url, database, username, password, namespace string, tags map[string]string) error { 74 u, err := uurl.Parse(url) 75 if err != nil { 76 return fmt.Errorf("Unable to parse InfluxDB. url: %s, err: %v", url, err) 77 } 78 79 rep := &reporter{ 80 reg: r, 81 url: *u, 82 database: database, 83 username: username, 84 password: password, 85 namespace: namespace, 86 tags: tags, 87 cache: make(map[string]int64), 88 } 89 if err := rep.makeClient(); err != nil { 90 return fmt.Errorf("Unable to make InfluxDB client. err: %v", err) 91 } 92 93 if err := rep.send(); err != nil { 94 return fmt.Errorf("Unable to send to InfluxDB. err: %v", err) 95 } 96 97 return nil 98 } 99 100 func (r *reporter) makeClient() (err error) { 101 r.client, err = client.NewClient(client.Config{ 102 URL: r.url, 103 Username: r.username, 104 Password: r.password, 105 }) 106 107 return 108 } 109 110 func (r *reporter) run() { 111 intervalTicker := time.Tick(r.interval) 112 pingTicker := time.Tick(time.Second * 5) 113 114 for { 115 select { 116 case <-intervalTicker: 117 if err := r.send(); err != nil { 118 log.Warn("Unable to send to InfluxDB", "err", err) 119 } 120 case <-pingTicker: 121 _, _, err := r.client.Ping() 122 if err != nil { 123 log.Warn("Got error while sending a ping to InfluxDB, trying to recreate client", "err", err) 124 125 if err = r.makeClient(); err != nil { 126 log.Warn("Unable to make InfluxDB client", "err", err) 127 } 128 } 129 } 130 } 131 } 132 133 func (r *reporter) send() error { 134 var pts []client.Point 135 136 r.reg.Each(func(name string, i interface{}) { 137 now := time.Now() 138 namespace := r.namespace 139 140 switch metric := i.(type) { 141 case metrics.Counter: 142 v := metric.Count() 143 l := r.cache[name] 144 pts = append(pts, client.Point{ 145 Measurement: fmt.Sprintf("%s%s.count", namespace, name), 146 Tags: r.tags, 147 Fields: map[string]interface{}{ 148 "value": v - l, 149 }, 150 Time: now, 151 }) 152 r.cache[name] = v 153 case metrics.Gauge: 154 ms := metric.Snapshot() 155 pts = append(pts, client.Point{ 156 Measurement: fmt.Sprintf("%s%s.gauge", namespace, name), 157 Tags: r.tags, 158 Fields: map[string]interface{}{ 159 "value": ms.Value(), 160 }, 161 Time: now, 162 }) 163 case metrics.GaugeFloat64: 164 ms := metric.Snapshot() 165 pts = append(pts, client.Point{ 166 Measurement: fmt.Sprintf("%s%s.gauge", namespace, name), 167 Tags: r.tags, 168 Fields: map[string]interface{}{ 169 "value": ms.Value(), 170 }, 171 Time: now, 172 }) 173 case metrics.Histogram: 174 ms := metric.Snapshot() 175 ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999}) 176 pts = append(pts, client.Point{ 177 Measurement: fmt.Sprintf("%s%s.histogram", namespace, name), 178 Tags: r.tags, 179 Fields: map[string]interface{}{ 180 "count": ms.Count(), 181 "max": ms.Max(), 182 "mean": ms.Mean(), 183 "min": ms.Min(), 184 "stddev": ms.StdDev(), 185 "variance": ms.Variance(), 186 "p50": ps[0], 187 "p75": ps[1], 188 "p95": ps[2], 189 "p99": ps[3], 190 "p999": ps[4], 191 "p9999": ps[5], 192 }, 193 Time: now, 194 }) 195 case metrics.Meter: 196 ms := metric.Snapshot() 197 pts = append(pts, client.Point{ 198 Measurement: fmt.Sprintf("%s%s.meter", namespace, name), 199 Tags: r.tags, 200 Fields: map[string]interface{}{ 201 "count": ms.Count(), 202 "m1": ms.Rate1(), 203 "m5": ms.Rate5(), 204 "m15": ms.Rate15(), 205 "mean": ms.RateMean(), 206 }, 207 Time: now, 208 }) 209 case metrics.Timer: 210 ms := metric.Snapshot() 211 ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999}) 212 pts = append(pts, client.Point{ 213 Measurement: fmt.Sprintf("%s%s.timer", namespace, name), 214 Tags: r.tags, 215 Fields: map[string]interface{}{ 216 "count": ms.Count(), 217 "max": ms.Max(), 218 "mean": ms.Mean(), 219 "min": ms.Min(), 220 "stddev": ms.StdDev(), 221 "variance": ms.Variance(), 222 "p50": ps[0], 223 "p75": ps[1], 224 "p95": ps[2], 225 "p99": ps[3], 226 "p999": ps[4], 227 "p9999": ps[5], 228 "m1": ms.Rate1(), 229 "m5": ms.Rate5(), 230 "m15": ms.Rate15(), 231 "meanrate": ms.RateMean(), 232 }, 233 Time: now, 234 }) 235 case metrics.ResettingTimer: 236 t := metric.Snapshot() 237 238 if len(t.Values()) > 0 { 239 ps := t.Percentiles([]float64{50, 95, 99}) 240 val := t.Values() 241 pts = append(pts, client.Point{ 242 Measurement: fmt.Sprintf("%s%s.span", namespace, name), 243 Tags: r.tags, 244 Fields: map[string]interface{}{ 245 "count": len(val), 246 "max": val[len(val)-1], 247 "mean": t.Mean(), 248 "min": val[0], 249 "p50": ps[0], 250 "p95": ps[1], 251 "p99": ps[2], 252 }, 253 Time: now, 254 }) 255 } 256 } 257 }) 258 259 bps := client.BatchPoints{ 260 Points: pts, 261 Database: r.database, 262 } 263 264 _, err := r.client.Write(bps) 265 return err 266 } 267