github.com/m3db/m3@v1.5.0/src/query/api/v1/handler/prometheus/native/read_common.go (about) 1 // Copyright (c) 2018 Uber Technologies, Inc. 2 // 3 // Permission is hereby granted, free of charge, to any person obtaining a copy 4 // of this software and associated documentation files (the "Software"), to deal 5 // in the Software without restriction, including without limitation the rights 6 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 // copies of the Software, and to permit persons to whom the Software is 8 // furnished to do so, subject to the following conditions: 9 // 10 // The above copyright notice and this permission notice shall be included in 11 // all copies or substantial portions of the Software. 12 // 13 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 // THE SOFTWARE. 20 21 package native 22 23 import ( 24 "context" 25 "math" 26 "net/http" 27 28 "github.com/m3db/m3/src/query/api/v1/handler/prometheus" 29 "github.com/m3db/m3/src/query/api/v1/options" 30 "github.com/m3db/m3/src/query/block" 31 "github.com/m3db/m3/src/query/executor" 32 "github.com/m3db/m3/src/query/models" 33 "github.com/m3db/m3/src/query/parser/promql" 34 "github.com/m3db/m3/src/query/storage" 35 "github.com/m3db/m3/src/query/ts" 36 xerrors "github.com/m3db/m3/src/x/errors" 37 xhttp "github.com/m3db/m3/src/x/net/http" 38 xopentracing "github.com/m3db/m3/src/x/opentracing" 39 40 opentracinglog "github.com/opentracing/opentracing-go/log" 41 "github.com/uber-go/tally" 42 ) 43 44 type promReadMetrics struct { 45 fetchSuccess tally.Counter 46 fetchErrorsServer tally.Counter 47 fetchErrorsClient tally.Counter 48 fetchTimerSuccess tally.Timer 49 50 returnedDataMetrics PromReadReturnedDataMetrics 51 } 52 53 // PromReadReturnedDataMetrics are metrics on the data returned from prom reads. 54 type PromReadReturnedDataMetrics struct { 55 FetchSeries tally.Histogram 56 FetchDatapoints tally.Histogram 57 } 58 59 func newPromReadMetrics(scope tally.Scope) promReadMetrics { 60 return promReadMetrics{ 61 fetchSuccess: scope.Counter("fetch.success"), 62 fetchErrorsServer: scope.Tagged(map[string]string{"code": "5XX"}). 63 Counter("fetch.errors"), 64 fetchErrorsClient: scope.Tagged(map[string]string{"code": "4XX"}). 65 Counter("fetch.errors"), 66 fetchTimerSuccess: scope.Timer("fetch.success.latency"), 67 returnedDataMetrics: NewPromReadReturnedDataMetrics(scope), 68 } 69 } 70 71 // NewPromReadReturnedDataMetrics returns metrics for returned data. 72 func NewPromReadReturnedDataMetrics(scope tally.Scope) PromReadReturnedDataMetrics { 73 seriesBuckets := append(tally.ValueBuckets{0}, tally.MustMakeExponentialValueBuckets(1, 2, 16)...) 74 datapointBuckets := append(tally.ValueBuckets{0}, tally.MustMakeExponentialValueBuckets(100, 2, 16)...) 75 return PromReadReturnedDataMetrics{ 76 FetchSeries: scope.Histogram("fetch.series", seriesBuckets), 77 FetchDatapoints: scope.Histogram("fetch.datapoints", datapointBuckets), 78 } 79 } 80 81 func (m *promReadMetrics) incError(err error) { 82 if xhttp.IsClientError(err) { 83 m.fetchErrorsClient.Inc(1) 84 } else { 85 m.fetchErrorsServer.Inc(1) 86 } 87 } 88 89 // ReadResponse is the response that gets returned to the user 90 type ReadResponse struct { 91 Results []ts.Series `json:"results,omitempty"` 92 } 93 94 // ReadResult is a result from a remote read. 95 type ReadResult struct { 96 Series []*ts.Series 97 Meta block.ResultMetadata 98 BlockType block.BlockType 99 } 100 101 // ParseRequest parses the given request. 102 func ParseRequest( 103 ctx context.Context, 104 r *http.Request, 105 instantaneous bool, 106 opts options.HandlerOptions, 107 ) (context.Context, ParsedOptions, error) { 108 ctx, parsed, err := parseRequest(ctx, r, instantaneous, opts) 109 if err != nil { 110 // All parsing of requests should result in an invalid params error. 111 return nil, ParsedOptions{}, xerrors.NewInvalidParamsError(err) 112 } 113 return ctx, parsed, nil 114 } 115 116 func parseRequest( 117 ctx context.Context, 118 r *http.Request, 119 instantaneous bool, 120 opts options.HandlerOptions, 121 ) (context.Context, ParsedOptions, error) { 122 ctx, fetchOpts, err := opts.FetchOptionsBuilder().NewFetchOptions(ctx, r) 123 if err != nil { 124 return nil, ParsedOptions{}, err 125 } 126 127 queryOpts := &executor.QueryOptions{ 128 QueryContextOptions: models.QueryContextOptions{ 129 LimitMaxTimeseries: fetchOpts.SeriesLimit, 130 LimitMaxDocs: fetchOpts.DocsLimit, 131 LimitMaxReturnedSeries: fetchOpts.ReturnedSeriesLimit, 132 LimitMaxReturnedDatapoints: fetchOpts.ReturnedDatapointsLimit, 133 LimitMaxReturnedSeriesMetadata: fetchOpts.ReturnedSeriesMetadataLimit, 134 Instantaneous: instantaneous, 135 }, 136 } 137 138 restrictOpts := fetchOpts.RestrictQueryOptions.GetRestrictByType() 139 if restrictOpts != nil { 140 restrict := &models.RestrictFetchTypeQueryContextOptions{ 141 MetricsType: uint(restrictOpts.MetricsType), 142 StoragePolicy: restrictOpts.StoragePolicy, 143 } 144 145 queryOpts.QueryContextOptions.RestrictFetchType = restrict 146 } 147 148 var ( 149 engine = opts.Engine() 150 params models.RequestParams 151 ) 152 if instantaneous { 153 params, err = parseInstantaneousParams(r, engine.Options(), fetchOpts) 154 } else { 155 params, err = parseParams(r, engine.Options(), fetchOpts) 156 } 157 if err != nil { 158 return nil, ParsedOptions{}, err 159 } 160 161 return ctx, ParsedOptions{ 162 QueryOpts: queryOpts, 163 FetchOpts: fetchOpts, 164 Params: params, 165 }, nil 166 } 167 168 // ParsedOptions are parsed options for the query. 169 type ParsedOptions struct { 170 QueryOpts *executor.QueryOptions 171 FetchOpts *storage.FetchOptions 172 Params models.RequestParams 173 } 174 175 func read( 176 ctx context.Context, 177 parsed ParsedOptions, 178 handlerOpts options.HandlerOptions, 179 ) (ReadResult, error) { 180 var ( 181 opts = parsed.QueryOpts 182 fetchOpts = parsed.FetchOpts 183 params = parsed.Params 184 185 tagOpts = handlerOpts.TagOptions() 186 engine = handlerOpts.Engine() 187 ) 188 sp := xopentracing.SpanFromContextOrNoop(ctx) 189 sp.LogFields( 190 opentracinglog.String("params.query", params.Query), 191 xopentracing.Time("params.start", params.Start.ToTime()), 192 xopentracing.Time("params.end", params.End.ToTime()), 193 xopentracing.Time("params.now", params.Now), 194 xopentracing.Duration("params.step", params.Step), 195 ) 196 197 emptyResult := ReadResult{ 198 Meta: block.NewResultMetadata(), 199 BlockType: block.BlockEmpty, 200 } 201 202 // TODO: Capture timing 203 parseOpts := engine.Options().ParseOptions() 204 parser, err := promql.Parse(params.Query, params.Step, tagOpts, parseOpts) 205 if err != nil { 206 return emptyResult, xerrors.NewInvalidParamsError(err) 207 } 208 209 bl, err := engine.ExecuteExpr(ctx, parser, opts, fetchOpts, params) 210 if err != nil { 211 return emptyResult, err 212 } 213 214 resultMeta := bl.Meta().ResultMetadata 215 it, err := bl.StepIter() 216 if err != nil { 217 return emptyResult, err 218 } 219 220 seriesMeta := it.SeriesMeta() 221 numSeries := len(seriesMeta) 222 223 bounds := bl.Meta().Bounds 224 // Initialize data slices. 225 data := make([]ts.FixedResolutionMutableValues, 0, numSeries) 226 for i := 0; i < numSeries; i++ { 227 data = append(data, ts.NewFixedStepValues(bounds.StepSize, bounds.Steps(), 228 math.NaN(), bounds.Start)) 229 } 230 231 stepIndex := 0 232 for it.Next() { 233 step := it.Current() 234 for seriesIndex, v := range step.Values() { 235 mutableValuesForSeries := data[seriesIndex] 236 mutableValuesForSeries.SetValueAt(stepIndex, v) 237 } 238 239 stepIndex++ 240 } 241 242 if err := it.Err(); err != nil { 243 return emptyResult, err 244 } 245 246 seriesList := make([]*ts.Series, 0, len(data)) 247 for i, values := range data { 248 var ( 249 meta = seriesMeta[i] 250 tags = meta.Tags.AddTags(bl.Meta().Tags.Tags) 251 series = ts.NewSeries(meta.Name, values, tags) 252 ) 253 254 seriesList = append(seriesList, series) 255 } 256 257 if err := bl.Close(); err != nil { 258 return emptyResult, err 259 } 260 261 seriesList = prometheus.FilterSeriesByOptions(seriesList, fetchOpts) 262 263 blockType := bl.Info().Type() 264 265 return ReadResult{ 266 Series: seriesList, 267 Meta: resultMeta, 268 BlockType: blockType, 269 }, nil 270 } 271 272 // ReturnedDataLimited are parsed options for the query. 273 type ReturnedDataLimited struct { 274 Series int 275 Datapoints int 276 277 // Total series is the total number of series which maybe be >= Series. 278 // Truncation happens at the series-level to avoid presenting partial series 279 // and so this value is useful for indicating how many series would have 280 // been rendered without limiting either series or datapoints. 281 TotalSeries int 282 283 // Limited signals that the results returned were 284 // limited by either series or datapoint limits. 285 Limited bool 286 }