github.com/m3db/m3@v1.5.0/src/query/api/v1/handler/prometheus/remote/read.go (about) 1 // Copyright (c) 2018 Uber Technologies, Inc. 2 // 3 // Permission is hereby granted, free of charge, to any person obtaining a copy 4 // of this software and associated documentation files (the "Software"), to deal 5 // in the Software without restriction, including without limitation the rights 6 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 // copies of the Software, and to permit persons to whom the Software is 8 // furnished to do so, subject to the following conditions: 9 // 10 // The above copyright notice and this permission notice shall be included in 11 // all copies or substantial portions of the Software. 12 // 13 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 // THE SOFTWARE. 20 21 package remote 22 23 import ( 24 "bytes" 25 "context" 26 "encoding/json" 27 "errors" 28 "fmt" 29 "net/http" 30 "strings" 31 "sync" 32 "time" 33 34 comparator "github.com/m3db/m3/src/cmd/services/m3comparator/main/parser" 35 "github.com/m3db/m3/src/query/api/v1/handler/prometheus" 36 "github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions" 37 "github.com/m3db/m3/src/query/api/v1/options" 38 "github.com/m3db/m3/src/query/api/v1/route" 39 "github.com/m3db/m3/src/query/block" 40 "github.com/m3db/m3/src/query/executor" 41 "github.com/m3db/m3/src/query/generated/proto/prompb" 42 "github.com/m3db/m3/src/query/models" 43 xpromql "github.com/m3db/m3/src/query/parser/promql" 44 "github.com/m3db/m3/src/query/storage" 45 "github.com/m3db/m3/src/query/ts" 46 "github.com/m3db/m3/src/query/util" 47 "github.com/m3db/m3/src/query/util/logging" 48 xerrors "github.com/m3db/m3/src/x/errors" 49 xhttp "github.com/m3db/m3/src/x/net/http" 50 xtime "github.com/m3db/m3/src/x/time" 51 52 "github.com/golang/protobuf/proto" 53 "github.com/golang/snappy" 54 "github.com/prometheus/prometheus/pkg/labels" 55 promql "github.com/prometheus/prometheus/promql/parser" 56 "github.com/uber-go/tally" 57 "go.uber.org/zap" 58 ) 59 60 const ( 61 // PromReadURL is the url for remote prom read handler 62 PromReadURL = route.Prefix + "/prom/remote/read" 63 ) 64 65 // PromReadHTTPMethods are the HTTP methods used with this resource. 66 var PromReadHTTPMethods = []string{http.MethodPost, http.MethodGet} 67 68 // promReadHandler is a handler for the prometheus remote read endpoint. 69 type promReadHandler struct { 70 promReadMetrics promReadMetrics 71 opts options.HandlerOptions 72 } 73 74 // NewPromReadHandler returns a new instance of handler. 75 func NewPromReadHandler(opts options.HandlerOptions) http.Handler { 76 taggedScope := opts.InstrumentOpts().MetricsScope(). 77 Tagged(map[string]string{"handler": "remote-read"}) 78 return &promReadHandler{ 79 promReadMetrics: newPromReadMetrics(taggedScope), 80 opts: opts, 81 } 82 } 83 84 type promReadMetrics struct { 85 fetchSuccess tally.Counter 86 fetchErrorsServer tally.Counter 87 fetchErrorsClient tally.Counter 88 fetchTimerSuccess tally.Timer 89 } 90 91 func newPromReadMetrics(scope tally.Scope) promReadMetrics { 92 return promReadMetrics{ 93 fetchSuccess: scope. 94 Counter("fetch.success"), 95 fetchErrorsServer: scope.Tagged(map[string]string{"code": "5XX"}). 96 Counter("fetch.errors"), 97 fetchErrorsClient: scope.Tagged(map[string]string{"code": "4XX"}). 98 Counter("fetch.errors"), 99 fetchTimerSuccess: scope.Timer("fetch.success.latency"), 100 } 101 } 102 103 func (m *promReadMetrics) incError(err error) { 104 if xhttp.IsClientError(err) { 105 m.fetchErrorsClient.Inc(1) 106 } else { 107 m.fetchErrorsServer.Inc(1) 108 } 109 } 110 111 func (h *promReadHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { 112 timer := h.promReadMetrics.fetchTimerSuccess.Start() 113 defer timer.Stop() 114 115 logger := logging.WithContext(r.Context(), h.opts.InstrumentOpts()) 116 ctx, req, fetchOpts, rErr := ParseRequest(r.Context(), r, h.opts) 117 if rErr != nil { 118 h.promReadMetrics.incError(rErr) 119 logger.Error("remote read query parse error", 120 zap.Error(rErr), 121 zap.Any("req", req), 122 zap.Any("fetchOpts", fetchOpts)) 123 xhttp.WriteError(w, rErr) 124 return 125 } 126 127 readResult, err := Read(ctx, req, fetchOpts, h.opts) 128 if err != nil { 129 h.promReadMetrics.incError(err) 130 logger.Error("remote read query error", 131 zap.Error(err), 132 zap.Any("req", req), 133 zap.Any("fetchOpts", fetchOpts)) 134 xhttp.WriteError(w, err) 135 return 136 } 137 138 // Write headers before response. 139 err = handleroptions.AddDBResultResponseHeaders(w, readResult.Meta, fetchOpts) 140 if err != nil { 141 h.promReadMetrics.incError(err) 142 logger.Error("remote read query write response header error", 143 zap.Error(err), 144 zap.Any("req", req), 145 zap.Any("fetchOpts", fetchOpts)) 146 xhttp.WriteError(w, err) 147 return 148 } 149 150 // NB: if this errors, all relevant headers and information should already 151 // be sent to the writer; so it is not necessary to do anything here other 152 // than increment success/failure metrics. 153 switch r.FormValue("format") { 154 case "json": 155 result := readResultsJSON{ 156 Queries: make([]queryResultsJSON, 0, len(req.Queries)), 157 } 158 for i, q := range req.Queries { 159 start := storage.PromTimestampToTime(q.StartTimestampMs) 160 end := storage.PromTimestampToTime(q.EndTimestampMs) 161 162 all := readResult.Result[i].Timeseries 163 timeseries := make([]comparator.Series, 0, len(all)) 164 for _, s := range all { 165 datapoints := storage.PromSamplesToM3Datapoints(s.Samples) 166 tags := storage.PromLabelsToM3Tags(s.Labels, h.opts.TagOptions()) 167 series := toSeries(datapoints, tags) 168 series.Start = start 169 series.End = end 170 timeseries = append(timeseries, series) 171 } 172 173 matchers := make([]labelMatcherJSON, 0, len(q.Matchers)) 174 for _, m := range q.Matchers { 175 matcher := labelMatcherJSON{ 176 Type: m.Type.String(), 177 Name: string(m.Name), 178 Value: string(m.Value), 179 } 180 matchers = append(matchers, matcher) 181 } 182 183 result.Queries = append(result.Queries, queryResultsJSON{ 184 Query: queryJSON{ 185 Matchers: matchers, 186 }, 187 Start: start, 188 End: end, 189 Series: timeseries, 190 }) 191 } 192 193 w.Header().Set(xhttp.HeaderContentType, xhttp.ContentTypeJSON) 194 err = json.NewEncoder(w).Encode(result) 195 default: 196 err = WriteSnappyCompressed(w, readResult, logger) 197 } 198 199 if err != nil { 200 h.promReadMetrics.incError(err) 201 } else { 202 h.promReadMetrics.fetchSuccess.Inc(1) 203 } 204 } 205 206 type readResultsJSON struct { 207 Queries []queryResultsJSON `json:"queries"` 208 } 209 210 type queryResultsJSON struct { 211 Query queryJSON `json:"query"` 212 Start time.Time `json:"start"` 213 End time.Time `json:"end"` 214 Series []comparator.Series `json:"series"` 215 } 216 217 type queryJSON struct { 218 Matchers []labelMatcherJSON `json:"matchers"` 219 } 220 221 type labelMatcherJSON struct { 222 Type string `json:"type"` 223 Name string `json:"name"` 224 Value string `json:"value"` 225 } 226 227 // WriteSnappyCompressed writes snappy compressed results to the given writer. 228 func WriteSnappyCompressed( 229 w http.ResponseWriter, 230 readResult ReadResult, 231 logger *zap.Logger, 232 ) error { 233 resp := &prompb.ReadResponse{ 234 Results: readResult.Result, 235 } 236 237 data, err := proto.Marshal(resp) 238 if err != nil { 239 logger.Error("unable to marshal read results to protobuf", zap.Error(err)) 240 xhttp.WriteError(w, err) 241 return err 242 } 243 244 w.Header().Set(xhttp.HeaderContentType, xhttp.ContentTypeProtobuf) 245 w.Header().Set("Content-Encoding", "snappy") 246 247 compressed := snappy.Encode(nil, data) 248 if _, err := w.Write(compressed); err != nil { 249 logger.Error("unable to encode read results to snappy", 250 zap.Error(err)) 251 xhttp.WriteError(w, err) 252 } 253 254 return err 255 } 256 257 func parseCompressedRequest( 258 r *http.Request, 259 ) (*prompb.ReadRequest, error) { 260 result, err := prometheus.ParsePromCompressedRequest(r) 261 if err != nil { 262 return nil, err 263 } 264 265 var req prompb.ReadRequest 266 if err := proto.Unmarshal(result.UncompressedBody, &req); err != nil { 267 return nil, xerrors.NewInvalidParamsError(err) 268 } 269 270 return &req, nil 271 } 272 273 // ReadResult is a read result. 274 type ReadResult struct { 275 Meta block.ResultMetadata 276 Result []*prompb.QueryResult 277 } 278 279 // ParseExpr parses a prometheus request expression into the constituent 280 // fetches, rather than the full query application. 281 func ParseExpr( 282 r *http.Request, 283 opts xpromql.ParseOptions, 284 ) (*prompb.ReadRequest, error) { 285 expr, err := parseExpr(r, opts) 286 if err != nil { 287 // Always invalid request if parsing fails params. 288 return nil, xerrors.NewInvalidParamsError(err) 289 } 290 return expr, nil 291 } 292 293 func parseExpr( 294 r *http.Request, 295 opts xpromql.ParseOptions, 296 ) (*prompb.ReadRequest, error) { 297 var req *prompb.ReadRequest 298 exprParam := strings.TrimSpace(r.FormValue("query")) 299 if len(exprParam) == 0 { 300 return nil, fmt.Errorf("cannot parse params: no expr") 301 } 302 303 queryStart, err := util.ParseTimeString(r.FormValue("start")) 304 if err != nil { 305 return nil, err 306 } 307 308 queryEnd, err := util.ParseTimeString(r.FormValue("end")) 309 if err != nil { 310 return nil, err 311 } 312 313 fn := opts.ParseFn() 314 req = &prompb.ReadRequest{} 315 expr, err := fn(exprParam) 316 if err != nil { 317 return nil, err 318 } 319 320 var vectorsInspected []*promql.VectorSelector 321 promql.Inspect(expr, func(node promql.Node, path []promql.Node) error { 322 var ( 323 start = xtime.ToUnixNano(queryStart) 324 end = xtime.ToUnixNano(queryEnd) 325 offset time.Duration 326 labelMatchers []*labels.Matcher 327 ) 328 329 if n, ok := node.(*promql.MatrixSelector); ok { 330 if n.Range > 0 { 331 start = start.Add(-1 * n.Range) 332 } 333 334 vectorSelector := n.VectorSelector.(*promql.VectorSelector) 335 336 // Check already inspected (matrix can be walked further into 337 // child vector selector). 338 for _, existing := range vectorsInspected { 339 if existing == vectorSelector { 340 return nil // Already inspected. 341 } 342 } 343 344 vectorsInspected = append(vectorsInspected, vectorSelector) 345 346 offset = vectorSelector.OriginalOffset 347 labelMatchers = vectorSelector.LabelMatchers 348 } else if n, ok := node.(*promql.VectorSelector); ok { 349 // Check already inspected (matrix can be walked further into 350 // child vector selector). 351 for _, existing := range vectorsInspected { 352 if existing == n { 353 return nil // Already inspected. 354 } 355 } 356 357 vectorsInspected = append(vectorsInspected, n) 358 359 offset = n.OriginalOffset 360 labelMatchers = n.LabelMatchers 361 } else { 362 return nil 363 } 364 365 if offset > 0 { 366 start = start.Add(-1 * offset) 367 end = end.Add(-1 * offset) 368 } 369 370 matchers, err := toLabelMatchers(labelMatchers) 371 if err != nil { 372 return err 373 } 374 375 query := &prompb.Query{ 376 StartTimestampMs: storage.TimeToPromTimestamp(start), 377 EndTimestampMs: storage.TimeToPromTimestamp(end), 378 Matchers: matchers, 379 } 380 381 req.Queries = append(req.Queries, query) 382 return nil 383 }) 384 385 return req, nil 386 } 387 388 // ParseRequest parses the compressed request 389 func ParseRequest( 390 ctx context.Context, 391 r *http.Request, 392 opts options.HandlerOptions, 393 ) (context.Context, *prompb.ReadRequest, *storage.FetchOptions, error) { 394 ctx, req, fetchOpts, err := parseRequest(ctx, r, opts) 395 if err != nil { 396 // Always invalid request if parsing fails params. 397 return nil, nil, nil, xerrors.NewInvalidParamsError(err) 398 } 399 return ctx, req, fetchOpts, nil 400 } 401 402 func parseRequest( 403 ctx context.Context, 404 r *http.Request, 405 opts options.HandlerOptions, 406 ) (context.Context, *prompb.ReadRequest, *storage.FetchOptions, error) { 407 var ( 408 req *prompb.ReadRequest 409 err error 410 ) 411 switch { 412 case r.Method == http.MethodGet && strings.TrimSpace(r.FormValue("query")) != "": 413 req, err = ParseExpr(r, opts.Engine().Options().ParseOptions()) 414 default: 415 req, err = parseCompressedRequest(r) 416 } 417 if err != nil { 418 return nil, nil, nil, err 419 } 420 421 ctx, fetchOpts, rErr := opts.FetchOptionsBuilder().NewFetchOptions(ctx, r) 422 if rErr != nil { 423 return nil, nil, nil, rErr 424 } 425 426 return ctx, req, fetchOpts, nil 427 } 428 429 // Read performs a remote read on the given engine. 430 func Read( 431 ctx context.Context, 432 r *prompb.ReadRequest, 433 fetchOpts *storage.FetchOptions, 434 opts options.HandlerOptions, 435 ) (ReadResult, error) { 436 var ( 437 queryCount = len(r.Queries) 438 cancelFuncs = make([]context.CancelFunc, queryCount) 439 queryResults = make([]*prompb.QueryResult, queryCount) 440 meta = block.NewResultMetadata() 441 queryOpts = &executor.QueryOptions{ 442 QueryContextOptions: models.QueryContextOptions{ 443 LimitMaxTimeseries: fetchOpts.SeriesLimit, 444 LimitMaxDocs: fetchOpts.DocsLimit, 445 LimitMaxReturnedSeries: fetchOpts.ReturnedSeriesLimit, 446 LimitMaxReturnedDatapoints: fetchOpts.ReturnedDatapointsLimit, 447 LimitMaxReturnedSeriesMetadata: fetchOpts.ReturnedSeriesMetadataLimit, 448 }, 449 } 450 451 engine = opts.Engine() 452 453 wg sync.WaitGroup 454 mu sync.Mutex 455 multiErr xerrors.MultiError 456 ) 457 458 wg.Add(queryCount) 459 for i, promQuery := range r.Queries { 460 i, promQuery := i, promQuery // Capture vars for lambda. 461 go func() { 462 ctx, cancel := context.WithTimeout(ctx, fetchOpts.Timeout) 463 defer func() { 464 wg.Done() 465 cancel() 466 }() 467 468 cancelFuncs[i] = cancel 469 query, err := storage.PromReadQueryToM3(promQuery) 470 if err != nil { 471 mu.Lock() 472 multiErr = multiErr.Add(err) 473 mu.Unlock() 474 return 475 } 476 477 result, err := engine.ExecuteProm(ctx, query, queryOpts, fetchOpts) 478 if err != nil { 479 mu.Lock() 480 multiErr = multiErr.Add(err) 481 mu.Unlock() 482 return 483 } 484 485 result.PromResult.Timeseries = filterResults( 486 result.PromResult.GetTimeseries(), fetchOpts) 487 mu.Lock() 488 queryResults[i] = result.PromResult 489 meta = meta.CombineMetadata(result.Metadata) 490 mu.Unlock() 491 }() 492 } 493 494 wg.Wait() 495 for _, cancel := range cancelFuncs { 496 cancel() 497 } 498 499 if err := multiErr.FinalError(); err != nil { 500 return ReadResult{Result: nil, Meta: meta}, err 501 } 502 503 return ReadResult{Result: queryResults, Meta: meta}, nil 504 } 505 506 // filterResults removes series tags based on options. 507 func filterResults( 508 series []*prompb.TimeSeries, 509 opts *storage.FetchOptions, 510 ) []*prompb.TimeSeries { 511 if opts == nil { 512 return series 513 } 514 515 keys := opts.RestrictQueryOptions.GetRestrictByTag().GetFilterByNames() 516 if len(keys) == 0 { 517 return series 518 } 519 520 for i, s := range series { 521 series[i].Labels = filterLabels(s.Labels, keys) 522 } 523 524 return series 525 } 526 527 func filterLabels( 528 labels []prompb.Label, 529 filtering [][]byte, 530 ) []prompb.Label { 531 if len(filtering) == 0 { 532 return labels 533 } 534 535 filtered := labels[:0] 536 for _, l := range labels { 537 skip := false 538 for _, f := range filtering { 539 if bytes.Equal(l.GetName(), f) { 540 skip = true 541 break 542 } 543 } 544 545 if skip { 546 continue 547 } 548 549 filtered = append(filtered, l) 550 } 551 552 return filtered 553 } 554 555 func tagsConvert(ts models.Tags) comparator.Tags { 556 tags := make(comparator.Tags, 0, ts.Len()) 557 for _, t := range ts.Tags { 558 tags = append(tags, comparator.NewTag(string(t.Name), string(t.Value))) 559 } 560 561 return tags 562 } 563 564 func datapointsConvert(dps ts.Datapoints) comparator.Datapoints { 565 datapoints := make(comparator.Datapoints, 0, dps.Len()) 566 for _, dp := range dps.Datapoints() { 567 val := comparator.Datapoint{ 568 Value: comparator.Value(dp.Value), 569 Timestamp: dp.Timestamp.ToTime(), 570 } 571 datapoints = append(datapoints, val) 572 } 573 574 return datapoints 575 } 576 577 func toSeries(dps ts.Datapoints, tags models.Tags) comparator.Series { 578 return comparator.Series{ 579 Tags: tagsConvert(tags), 580 Datapoints: datapointsConvert(dps), 581 } 582 } 583 584 func toLabelMatchers(matchers []*labels.Matcher) ([]*prompb.LabelMatcher, error) { 585 pbMatchers := make([]*prompb.LabelMatcher, 0, len(matchers)) 586 for _, m := range matchers { 587 var mType prompb.LabelMatcher_Type 588 switch m.Type { 589 case labels.MatchEqual: 590 mType = prompb.LabelMatcher_EQ 591 case labels.MatchNotEqual: 592 mType = prompb.LabelMatcher_NEQ 593 case labels.MatchRegexp: 594 mType = prompb.LabelMatcher_RE 595 case labels.MatchNotRegexp: 596 mType = prompb.LabelMatcher_NRE 597 default: 598 return nil, errors.New("invalid matcher type") 599 } 600 pbMatchers = append(pbMatchers, &prompb.LabelMatcher{ 601 Type: mType, 602 Name: []byte(m.Name), 603 Value: []byte(m.Value), 604 }) 605 } 606 return pbMatchers, nil 607 }