github.com/yinchengtsinghua/golang-Eos-dpos-Ethereum@v0.0.0-20190121132951-92cc4225ed8e/metrics/influxdb/influxdb.go (about) 1 2 //此源码被清华学神尹成大魔王专业翻译分析并修改 3 //尹成QQ77025077 4 //尹成微信18510341407 5 //尹成所在QQ群721929980 6 //尹成邮箱 yinc13@mails.tsinghua.edu.cn 7 //尹成毕业于清华大学,微软区块链领域全球最有价值专家 8 //https://mvp.microsoft.com/zh-cn/PublicProfile/4033620 9 package influxdb 10 11 import ( 12 "fmt" 13 uurl "net/url" 14 "time" 15 16 "github.com/ethereum/go-ethereum/log" 17 "github.com/ethereum/go-ethereum/metrics" 18 "github.com/influxdata/influxdb/client" 19 ) 20 21 type reporter struct { 22 reg metrics.Registry 23 interval time.Duration 24 25 url uurl.URL 26 database string 27 username string 28 password string 29 namespace string 30 tags map[string]string 31 32 client *client.Client 33 34 cache map[string]int64 35 } 36 37 //influxdb启动一个influxdb报告程序,该报告程序将在每个d间隔发布来自给定metrics.registry的。 38 func InfluxDB(r metrics.Registry, d time.Duration, url, database, username, password, namespace string) { 39 InfluxDBWithTags(r, d, url, database, username, password, namespace, nil) 40 } 41 42 //influxdb with tags启动一个influxdb报告程序,该报告程序将在每个d间隔使用指定的标记从给定的metrics.registry中发布 43 func InfluxDBWithTags(r metrics.Registry, d time.Duration, url, database, username, password, namespace string, tags map[string]string) { 44 u, err := uurl.Parse(url) 45 if err != nil { 46 log.Warn("Unable to parse InfluxDB", "url", url, "err", err) 47 return 48 } 49 50 rep := &reporter{ 51 reg: r, 52 interval: d, 53 url: *u, 54 database: database, 55 username: username, 56 password: password, 57 namespace: namespace, 58 tags: tags, 59 cache: make(map[string]int64), 60 } 61 if err := rep.makeClient(); err != nil { 62 log.Warn("Unable to make InfluxDB client", "err", err) 63 return 64 } 65 66 rep.run() 67 } 68 69 func (r *reporter) makeClient() (err error) { 70 r.client, err = client.NewClient(client.Config{ 71 URL: r.url, 72 Username: r.username, 73 Password: r.password, 74 }) 75 76 return 77 } 78 79 func (r *reporter) run() { 80 intervalTicker := time.Tick(r.interval) 81 pingTicker := time.Tick(time.Second * 5) 82 83 for { 84 select { 85 case <-intervalTicker: 86 if err := r.send(); err != nil { 87 log.Warn("Unable to send to InfluxDB", "err", err) 88 } 89 case <-pingTicker: 90 _, _, err := r.client.Ping() 91 if err != nil { 92 log.Warn("Got error while sending a ping to InfluxDB, trying to recreate client", "err", err) 93 94 if err = r.makeClient(); err != nil { 95 log.Warn("Unable to make InfluxDB client", "err", err) 96 } 97 } 98 } 99 } 100 } 101 102 func (r *reporter) send() error { 103 var pts []client.Point 104 105 r.reg.Each(func(name string, i interface{}) { 106 now := time.Now() 107 namespace := r.namespace 108 109 switch metric := i.(type) { 110 case metrics.Counter: 111 v := metric.Count() 112 l := r.cache[name] 113 pts = append(pts, client.Point{ 114 Measurement: fmt.Sprintf("%s%s.count", namespace, name), 115 Tags: r.tags, 116 Fields: map[string]interface{}{ 117 "value": v - l, 118 }, 119 Time: now, 120 }) 121 r.cache[name] = v 122 case metrics.Gauge: 123 ms := metric.Snapshot() 124 pts = append(pts, client.Point{ 125 Measurement: fmt.Sprintf("%s%s.gauge", namespace, name), 126 Tags: r.tags, 127 Fields: map[string]interface{}{ 128 "value": ms.Value(), 129 }, 130 Time: now, 131 }) 132 case metrics.GaugeFloat64: 133 ms := metric.Snapshot() 134 pts = append(pts, client.Point{ 135 Measurement: fmt.Sprintf("%s%s.gauge", namespace, name), 136 Tags: r.tags, 137 Fields: map[string]interface{}{ 138 "value": ms.Value(), 139 }, 140 Time: now, 141 }) 142 case metrics.Histogram: 143 ms := metric.Snapshot() 144 ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999}) 145 pts = append(pts, client.Point{ 146 Measurement: fmt.Sprintf("%s%s.histogram", namespace, name), 147 Tags: r.tags, 148 Fields: map[string]interface{}{ 149 "count": ms.Count(), 150 "max": ms.Max(), 151 "mean": ms.Mean(), 152 "min": ms.Min(), 153 "stddev": ms.StdDev(), 154 "variance": ms.Variance(), 155 "p50": ps[0], 156 "p75": ps[1], 157 "p95": ps[2], 158 "p99": ps[3], 159 "p999": ps[4], 160 "p9999": ps[5], 161 }, 162 Time: now, 163 }) 164 case metrics.Meter: 165 ms := metric.Snapshot() 166 pts = append(pts, client.Point{ 167 Measurement: fmt.Sprintf("%s%s.meter", namespace, name), 168 Tags: r.tags, 169 Fields: map[string]interface{}{ 170 "count": ms.Count(), 171 "m1": ms.Rate1(), 172 "m5": ms.Rate5(), 173 "m15": ms.Rate15(), 174 "mean": ms.RateMean(), 175 }, 176 Time: now, 177 }) 178 case metrics.Timer: 179 ms := metric.Snapshot() 180 ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999}) 181 pts = append(pts, client.Point{ 182 Measurement: fmt.Sprintf("%s%s.timer", namespace, name), 183 Tags: r.tags, 184 Fields: map[string]interface{}{ 185 "count": ms.Count(), 186 "max": ms.Max(), 187 "mean": ms.Mean(), 188 "min": ms.Min(), 189 "stddev": ms.StdDev(), 190 "variance": ms.Variance(), 191 "p50": ps[0], 192 "p75": ps[1], 193 "p95": ps[2], 194 "p99": ps[3], 195 "p999": ps[4], 196 "p9999": ps[5], 197 "m1": ms.Rate1(), 198 "m5": ms.Rate5(), 199 "m15": ms.Rate15(), 200 "meanrate": ms.RateMean(), 201 }, 202 Time: now, 203 }) 204 case metrics.ResettingTimer: 205 t := metric.Snapshot() 206 207 if len(t.Values()) > 0 { 208 ps := t.Percentiles([]float64{50, 95, 99}) 209 val := t.Values() 210 pts = append(pts, client.Point{ 211 Measurement: fmt.Sprintf("%s%s.span", namespace, name), 212 Tags: r.tags, 213 Fields: map[string]interface{}{ 214 "count": len(val), 215 "max": val[len(val)-1], 216 "mean": t.Mean(), 217 "min": val[0], 218 "p50": ps[0], 219 "p95": ps[1], 220 "p99": ps[2], 221 }, 222 Time: now, 223 }) 224 } 225 } 226 }) 227 228 bps := client.BatchPoints{ 229 Points: pts, 230 Database: r.database, 231 } 232 233 _, err := r.client.Write(bps) 234 return err 235 }