github.com/yankunsam/loki/v2@v2.6.3-0.20220817130409-389df5235c27/pkg/querier/querier.go (about) 1 package querier 2 3 import ( 4 "context" 5 "flag" 6 "net/http" 7 "time" 8 9 "github.com/go-kit/log/level" 10 "github.com/grafana/dskit/tenant" 11 "github.com/pkg/errors" 12 "github.com/prometheus/client_golang/prometheus" 13 "github.com/prometheus/common/model" 14 "github.com/weaveworks/common/httpgrpc" 15 "golang.org/x/sync/errgroup" 16 "google.golang.org/grpc/health/grpc_health_v1" 17 18 "github.com/grafana/loki/pkg/iter" 19 "github.com/grafana/loki/pkg/loghttp" 20 "github.com/grafana/loki/pkg/logproto" 21 "github.com/grafana/loki/pkg/logql" 22 "github.com/grafana/loki/pkg/logql/syntax" 23 "github.com/grafana/loki/pkg/storage" 24 "github.com/grafana/loki/pkg/storage/stores/index/stats" 25 "github.com/grafana/loki/pkg/storage/stores/indexshipper/compactor/deletion" 26 listutil "github.com/grafana/loki/pkg/util" 27 "github.com/grafana/loki/pkg/util/spanlogger" 28 util_validation "github.com/grafana/loki/pkg/util/validation" 29 "github.com/grafana/loki/pkg/validation" 30 ) 31 32 const ( 33 // How long the Tailer should wait - once there are no entries to read from ingesters - 34 // before checking if a new entry is available (to avoid spinning the CPU in a continuous 35 // check loop) 36 tailerWaitEntryThrottle = time.Second / 2 37 ) 38 39 var nowFunc = func() time.Time { return time.Now() } 40 41 type interval struct { 42 start, end time.Time 43 } 44 45 // Config for a querier. 46 type Config struct { 47 QueryTimeout time.Duration `yaml:"query_timeout"` 48 TailMaxDuration time.Duration `yaml:"tail_max_duration"` 49 ExtraQueryDelay time.Duration `yaml:"extra_query_delay,omitempty"` 50 QueryIngestersWithin time.Duration `yaml:"query_ingesters_within,omitempty"` 51 IngesterQueryStoreMaxLookback time.Duration `yaml:"-"` 52 Engine logql.EngineOpts `yaml:"engine,omitempty"` 53 MaxConcurrent int `yaml:"max_concurrent"` 54 QueryStoreOnly bool `yaml:"query_store_only"` 55 QueryIngesterOnly bool `yaml:"query_ingester_only"` 56 MultiTenantQueriesEnabled bool `yaml:"multi_tenant_queries_enabled"` 57 } 58 59 // RegisterFlags register flags. 60 func (cfg *Config) RegisterFlags(f *flag.FlagSet) { 61 cfg.Engine.RegisterFlagsWithPrefix("querier", f) 62 f.DurationVar(&cfg.TailMaxDuration, "querier.tail-max-duration", 1*time.Hour, "Limit the duration for which live tailing request would be served") 63 f.DurationVar(&cfg.QueryTimeout, "querier.query-timeout", 1*time.Minute, "Timeout when querying backends (ingesters or storage) during the execution of a query request") 64 f.DurationVar(&cfg.ExtraQueryDelay, "querier.extra-query-delay", 0, "Time to wait before sending more than the minimum successful query requests.") 65 f.DurationVar(&cfg.QueryIngestersWithin, "querier.query-ingesters-within", 3*time.Hour, "Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester.") 66 f.IntVar(&cfg.MaxConcurrent, "querier.max-concurrent", 10, "The maximum number of concurrent queries.") 67 f.BoolVar(&cfg.QueryStoreOnly, "querier.query-store-only", false, "Queriers should only query the store and not try to query any ingesters") 68 f.BoolVar(&cfg.QueryIngesterOnly, "querier.query-ingester-only", false, "Queriers should only query the ingesters and not try to query any store") 69 f.BoolVar(&cfg.MultiTenantQueriesEnabled, "querier.multi-tenant-queries-enabled", false, "Enable queries across multiple tenants. (Experimental)") 70 } 71 72 // Validate validates the config. 73 func (cfg *Config) Validate() error { 74 if cfg.QueryStoreOnly && cfg.QueryIngesterOnly { 75 return errors.New("querier.query_store_only and querier.query_ingester_only cannot both be true") 76 } 77 return nil 78 } 79 80 // Querier can select logs and samples and handle query requests. 81 type Querier interface { 82 logql.Querier 83 Label(ctx context.Context, req *logproto.LabelRequest) (*logproto.LabelResponse, error) 84 Series(ctx context.Context, req *logproto.SeriesRequest) (*logproto.SeriesResponse, error) 85 Tail(ctx context.Context, req *logproto.TailRequest) (*Tailer, error) 86 IndexStats(ctx context.Context, req *loghttp.RangeQuery) (*stats.Stats, error) 87 } 88 89 // SingleTenantQuerier handles single tenant queries. 90 type SingleTenantQuerier struct { 91 cfg Config 92 store storage.Store 93 limits *validation.Overrides 94 ingesterQuerier *IngesterQuerier 95 deleteGetter deleteGetter 96 metrics *Metrics 97 } 98 99 type deleteGetter interface { 100 GetAllDeleteRequestsForUser(ctx context.Context, userID string) ([]deletion.DeleteRequest, error) 101 } 102 103 // New makes a new Querier. 104 func New(cfg Config, store storage.Store, ingesterQuerier *IngesterQuerier, limits *validation.Overrides, d deleteGetter, r prometheus.Registerer) (*SingleTenantQuerier, error) { 105 return &SingleTenantQuerier{ 106 cfg: cfg, 107 store: store, 108 ingesterQuerier: ingesterQuerier, 109 limits: limits, 110 deleteGetter: d, 111 metrics: NewMetrics(r), 112 }, nil 113 } 114 115 // Select Implements logql.Querier which select logs via matchers and regex filters. 116 func (q *SingleTenantQuerier) SelectLogs(ctx context.Context, params logql.SelectLogParams) (iter.EntryIterator, error) { 117 var err error 118 params.Start, params.End, err = q.validateQueryRequest(ctx, params) 119 if err != nil { 120 return nil, err 121 } 122 123 params.QueryRequest.Deletes, err = q.deletesForUser(ctx, params.Start, params.End) 124 if err != nil { 125 level.Error(spanlogger.FromContext(ctx)).Log("msg", "failed loading deletes for user", "err", err) 126 } 127 128 ingesterQueryInterval, storeQueryInterval := q.buildQueryIntervals(params.Start, params.End) 129 130 iters := []iter.EntryIterator{} 131 if !q.cfg.QueryStoreOnly && ingesterQueryInterval != nil { 132 // Make a copy of the request before modifying 133 // because the initial request is used below to query stores 134 queryRequestCopy := *params.QueryRequest 135 newParams := logql.SelectLogParams{ 136 QueryRequest: &queryRequestCopy, 137 } 138 newParams.Start = ingesterQueryInterval.start 139 newParams.End = ingesterQueryInterval.end 140 level.Debug(spanlogger.FromContext(ctx)).Log( 141 "msg", "querying ingester", 142 "params", newParams) 143 ingesterIters, err := q.ingesterQuerier.SelectLogs(ctx, newParams) 144 if err != nil { 145 return nil, err 146 } 147 148 iters = append(iters, ingesterIters...) 149 } 150 151 if !q.cfg.QueryIngesterOnly && storeQueryInterval != nil { 152 params.Start = storeQueryInterval.start 153 params.End = storeQueryInterval.end 154 level.Debug(spanlogger.FromContext(ctx)).Log( 155 "msg", "querying store", 156 "params", params) 157 storeIter, err := q.store.SelectLogs(ctx, params) 158 if err != nil { 159 return nil, err 160 } 161 162 iters = append(iters, storeIter) 163 } 164 if len(iters) == 1 { 165 return iters[0], nil 166 } 167 return iter.NewMergeEntryIterator(ctx, iters, params.Direction), nil 168 } 169 170 func (q *SingleTenantQuerier) SelectSamples(ctx context.Context, params logql.SelectSampleParams) (iter.SampleIterator, error) { 171 var err error 172 params.Start, params.End, err = q.validateQueryRequest(ctx, params) 173 if err != nil { 174 return nil, err 175 } 176 177 params.SampleQueryRequest.Deletes, err = q.deletesForUser(ctx, params.Start, params.End) 178 if err != nil { 179 level.Error(spanlogger.FromContext(ctx)).Log("msg", "failed loading deletes for user", "err", err) 180 } 181 182 ingesterQueryInterval, storeQueryInterval := q.buildQueryIntervals(params.Start, params.End) 183 184 iters := []iter.SampleIterator{} 185 if !q.cfg.QueryStoreOnly && ingesterQueryInterval != nil { 186 // Make a copy of the request before modifying 187 // because the initial request is used below to query stores 188 queryRequestCopy := *params.SampleQueryRequest 189 newParams := logql.SelectSampleParams{ 190 SampleQueryRequest: &queryRequestCopy, 191 } 192 newParams.Start = ingesterQueryInterval.start 193 newParams.End = ingesterQueryInterval.end 194 195 ingesterIters, err := q.ingesterQuerier.SelectSample(ctx, newParams) 196 if err != nil { 197 return nil, err 198 } 199 200 iters = append(iters, ingesterIters...) 201 } 202 203 if !q.cfg.QueryIngesterOnly && storeQueryInterval != nil { 204 params.Start = storeQueryInterval.start 205 params.End = storeQueryInterval.end 206 207 storeIter, err := q.store.SelectSamples(ctx, params) 208 if err != nil { 209 return nil, err 210 } 211 212 iters = append(iters, storeIter) 213 } 214 return iter.NewMergeSampleIterator(ctx, iters), nil 215 } 216 217 func (q *SingleTenantQuerier) deletesForUser(ctx context.Context, startT, endT time.Time) ([]*logproto.Delete, error) { 218 userID, err := tenant.TenantID(ctx) 219 if err != nil { 220 return nil, err 221 } 222 223 d, err := q.deleteGetter.GetAllDeleteRequestsForUser(ctx, userID) 224 if err != nil { 225 return nil, err 226 } 227 228 start := startT.UnixNano() 229 end := endT.UnixNano() 230 231 var deletes []*logproto.Delete 232 for _, del := range d { 233 if del.StartTime.UnixNano() <= end && del.EndTime.UnixNano() >= start { 234 deletes = append(deletes, &logproto.Delete{ 235 Selector: del.Query, 236 Start: del.StartTime.UnixNano(), 237 End: del.EndTime.UnixNano(), 238 }) 239 } 240 } 241 242 return deletes, nil 243 } 244 245 func (q *SingleTenantQuerier) isWithinIngesterMaxLookbackPeriod(maxLookback time.Duration, queryEnd time.Time) bool { 246 // if no lookback limits are configured, always consider this within the range of the lookback period 247 if maxLookback <= 0 { 248 return true 249 } 250 251 // find the first instance that we would want to query the ingester from... 252 ingesterOldestStartTime := time.Now().Add(-maxLookback) 253 254 // ...and if the query range ends before that, don't query the ingester 255 return queryEnd.After(ingesterOldestStartTime) 256 } 257 258 func (q *SingleTenantQuerier) calculateIngesterMaxLookbackPeriod() time.Duration { 259 mlb := time.Duration(-1) 260 if q.cfg.IngesterQueryStoreMaxLookback != 0 { 261 // IngesterQueryStoreMaxLookback takes the precedence over QueryIngestersWithin while also limiting the store query range. 262 mlb = q.cfg.IngesterQueryStoreMaxLookback 263 } else if q.cfg.QueryIngestersWithin != 0 { 264 mlb = q.cfg.QueryIngestersWithin 265 } 266 267 return mlb 268 } 269 270 func (q *SingleTenantQuerier) buildQueryIntervals(queryStart, queryEnd time.Time) (*interval, *interval) { 271 // limitQueryInterval is a flag for whether store queries should be limited to start time of ingester queries. 272 limitQueryInterval := false 273 // ingesterMLB having -1 means query ingester for whole duration. 274 if q.cfg.IngesterQueryStoreMaxLookback != 0 { 275 // IngesterQueryStoreMaxLookback takes the precedence over QueryIngestersWithin while also limiting the store query range. 276 limitQueryInterval = true 277 } 278 279 ingesterMLB := q.calculateIngesterMaxLookbackPeriod() 280 281 // query ingester for whole duration. 282 if ingesterMLB == -1 { 283 i := &interval{ 284 start: queryStart, 285 end: queryEnd, 286 } 287 288 if limitQueryInterval { 289 // query only ingesters. 290 return i, nil 291 } 292 293 // query both stores and ingesters without limiting the query interval. 294 return i, i 295 } 296 297 ingesterQueryWithinRange := q.isWithinIngesterMaxLookbackPeriod(ingesterMLB, queryEnd) 298 299 // see if there is an overlap between ingester query interval and actual query interval, if not just do the store query. 300 if !ingesterQueryWithinRange { 301 return nil, &interval{ 302 start: queryStart, 303 end: queryEnd, 304 } 305 } 306 307 ingesterOldestStartTime := time.Now().Add(-ingesterMLB) 308 309 // if there is an overlap and we are not limiting the query interval then do both store and ingester query for whole query interval. 310 if !limitQueryInterval { 311 i := &interval{ 312 start: queryStart, 313 end: queryEnd, 314 } 315 return i, i 316 } 317 318 // since we are limiting the query interval, check if the query touches just the ingesters, if yes then query just the ingesters. 319 if ingesterOldestStartTime.Before(queryStart) { 320 return &interval{ 321 start: queryStart, 322 end: queryEnd, 323 }, nil 324 } 325 326 // limit the start of ingester query interval to ingesterOldestStartTime. 327 ingesterQueryInterval := &interval{ 328 start: ingesterOldestStartTime, 329 end: queryEnd, 330 } 331 332 // limit the end of ingester query interval to ingesterOldestStartTime. 333 storeQueryInterval := &interval{ 334 start: queryStart, 335 end: ingesterOldestStartTime, 336 } 337 338 // query touches only ingester query interval so do not do store query. 339 if storeQueryInterval.start.After(storeQueryInterval.end) { 340 storeQueryInterval = nil 341 } 342 343 return ingesterQueryInterval, storeQueryInterval 344 } 345 346 // Label does the heavy lifting for a Label query. 347 func (q *SingleTenantQuerier) Label(ctx context.Context, req *logproto.LabelRequest) (*logproto.LabelResponse, error) { 348 userID, err := tenant.TenantID(ctx) 349 if err != nil { 350 return nil, err 351 } 352 353 if *req.Start, *req.End, err = validateQueryTimeRangeLimits(ctx, userID, q.limits, *req.Start, *req.End); err != nil { 354 return nil, err 355 } 356 357 // Enforce the query timeout while querying backends 358 ctx, cancel := context.WithDeadline(ctx, time.Now().Add(q.cfg.QueryTimeout)) 359 defer cancel() 360 361 g, ctx := errgroup.WithContext(ctx) 362 363 ingesterQueryInterval, storeQueryInterval := q.buildQueryIntervals(*req.Start, *req.End) 364 365 var ingesterValues [][]string 366 if !q.cfg.QueryStoreOnly && ingesterQueryInterval != nil { 367 g.Go(func() error { 368 var err error 369 timeFramedReq := *req 370 timeFramedReq.Start = &ingesterQueryInterval.start 371 timeFramedReq.End = &ingesterQueryInterval.end 372 373 ingesterValues, err = q.ingesterQuerier.Label(ctx, &timeFramedReq) 374 return err 375 }) 376 } 377 378 var storeValues []string 379 if !q.cfg.QueryIngesterOnly && storeQueryInterval != nil { 380 g.Go(func() error { 381 var ( 382 err error 383 from = model.TimeFromUnixNano(storeQueryInterval.start.UnixNano()) 384 through = model.TimeFromUnixNano(storeQueryInterval.end.UnixNano()) 385 ) 386 387 if req.Values { 388 storeValues, err = q.store.LabelValuesForMetricName(ctx, userID, from, through, "logs", req.Name) 389 } else { 390 storeValues, err = q.store.LabelNamesForMetricName(ctx, userID, from, through, "logs") 391 } 392 return err 393 }) 394 } 395 396 if err := g.Wait(); err != nil { 397 return nil, err 398 } 399 400 results := append(ingesterValues, storeValues) 401 return &logproto.LabelResponse{ 402 Values: listutil.MergeStringLists(results...), 403 }, nil 404 } 405 406 // Check implements the grpc healthcheck 407 func (*SingleTenantQuerier) Check(_ context.Context, _ *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) { 408 return &grpc_health_v1.HealthCheckResponse{Status: grpc_health_v1.HealthCheckResponse_SERVING}, nil 409 } 410 411 // Tail keeps getting matching logs from all ingesters for given query 412 func (q *SingleTenantQuerier) Tail(ctx context.Context, req *logproto.TailRequest) (*Tailer, error) { 413 err := q.checkTailRequestLimit(ctx) 414 if err != nil { 415 return nil, err 416 } 417 418 deletes, err := q.deletesForUser(ctx, req.Start, time.Now()) 419 if err != nil { 420 level.Error(spanlogger.FromContext(ctx)).Log("msg", "failed loading deletes for user", "err", err) 421 } 422 423 histReq := logql.SelectLogParams{ 424 QueryRequest: &logproto.QueryRequest{ 425 Selector: req.Query, 426 Start: req.Start, 427 End: time.Now(), 428 Limit: req.Limit, 429 Direction: logproto.BACKWARD, 430 Deletes: deletes, 431 }, 432 } 433 434 histReq.Start, histReq.End, err = q.validateQueryRequest(ctx, histReq) 435 if err != nil { 436 return nil, err 437 } 438 439 // Enforce the query timeout except when tailing, otherwise the tailing 440 // will be terminated once the query timeout is reached 441 tailCtx := ctx 442 queryCtx, cancelQuery := context.WithDeadline(ctx, time.Now().Add(q.cfg.QueryTimeout)) 443 defer cancelQuery() 444 445 tailClients, err := q.ingesterQuerier.Tail(tailCtx, req) 446 if err != nil { 447 return nil, err 448 } 449 450 histIterators, err := q.SelectLogs(queryCtx, histReq) 451 if err != nil { 452 return nil, err 453 } 454 455 reversedIterator, err := iter.NewReversedIter(histIterators, req.Limit, true) 456 if err != nil { 457 return nil, err 458 } 459 460 return newTailer( 461 time.Duration(req.DelayFor)*time.Second, 462 tailClients, 463 reversedIterator, 464 func(connectedIngestersAddr []string) (map[string]logproto.Querier_TailClient, error) { 465 return q.ingesterQuerier.TailDisconnectedIngesters(tailCtx, req, connectedIngestersAddr) 466 }, 467 q.cfg.TailMaxDuration, 468 tailerWaitEntryThrottle, 469 q.metrics, 470 ), nil 471 } 472 473 // Series fetches any matching series for a list of matcher sets 474 func (q *SingleTenantQuerier) Series(ctx context.Context, req *logproto.SeriesRequest) (*logproto.SeriesResponse, error) { 475 userID, err := tenant.TenantID(ctx) 476 if err != nil { 477 return nil, err 478 } 479 480 if req.Start, req.End, err = validateQueryTimeRangeLimits(ctx, userID, q.limits, req.Start, req.End); err != nil { 481 return nil, err 482 } 483 484 // Enforce the query timeout while querying backends 485 ctx, cancel := context.WithDeadline(ctx, time.Now().Add(q.cfg.QueryTimeout)) 486 defer cancel() 487 488 return q.awaitSeries(ctx, req) 489 } 490 491 func (q *SingleTenantQuerier) awaitSeries(ctx context.Context, req *logproto.SeriesRequest) (*logproto.SeriesResponse, error) { 492 // buffer the channels to the # of calls they're expecting su 493 series := make(chan [][]logproto.SeriesIdentifier, 2) 494 errs := make(chan error, 2) 495 496 ingesterQueryInterval, storeQueryInterval := q.buildQueryIntervals(req.Start, req.End) 497 498 // fetch series from ingesters and store concurrently 499 if !q.cfg.QueryStoreOnly && ingesterQueryInterval != nil { 500 timeFramedReq := *req 501 timeFramedReq.Start = ingesterQueryInterval.start 502 timeFramedReq.End = ingesterQueryInterval.end 503 504 go func() { 505 // fetch series identifiers from ingesters 506 resps, err := q.ingesterQuerier.Series(ctx, &timeFramedReq) 507 if err != nil { 508 errs <- err 509 return 510 } 511 512 series <- resps 513 }() 514 } else { 515 // If only queriying the store or the query range does not overlap with the ingester max lookback period (defined by `query_ingesters_within`) 516 // then don't call out to the ingesters, and send an empty result back to the channel 517 series <- [][]logproto.SeriesIdentifier{} 518 } 519 520 if !q.cfg.QueryIngesterOnly && storeQueryInterval != nil { 521 go func() { 522 storeValues, err := q.seriesForMatchers(ctx, storeQueryInterval.start, storeQueryInterval.end, req.GetGroups(), req.Shards) 523 if err != nil { 524 errs <- err 525 return 526 } 527 series <- [][]logproto.SeriesIdentifier{storeValues} 528 }() 529 } else { 530 // If we are not querying the store, send an empty result back to the channel 531 series <- [][]logproto.SeriesIdentifier{} 532 } 533 534 var sets [][]logproto.SeriesIdentifier 535 for i := 0; i < 2; i++ { 536 select { 537 case err := <-errs: 538 return nil, err 539 case s := <-series: 540 sets = append(sets, s...) 541 } 542 } 543 544 deduped := make(map[string]logproto.SeriesIdentifier) 545 for _, set := range sets { 546 for _, s := range set { 547 key := loghttp.LabelSet(s.Labels).String() 548 if _, exists := deduped[key]; !exists { 549 deduped[key] = s 550 } 551 } 552 } 553 554 response := &logproto.SeriesResponse{ 555 Series: make([]logproto.SeriesIdentifier, 0, len(deduped)), 556 } 557 558 for _, s := range deduped { 559 response.Series = append(response.Series, s) 560 } 561 562 return response, nil 563 } 564 565 // seriesForMatchers fetches series from the store for each matcher set 566 // TODO: make efficient if/when the index supports labels so we don't have to read chunks 567 func (q *SingleTenantQuerier) seriesForMatchers( 568 ctx context.Context, 569 from, through time.Time, 570 groups []string, 571 shards []string, 572 ) ([]logproto.SeriesIdentifier, error) { 573 var results []logproto.SeriesIdentifier 574 // If no matchers were specified for the series query, 575 // we send a query with an empty matcher which will match every series. 576 if len(groups) == 0 { 577 var err error 578 results, err = q.seriesForMatcher(ctx, from, through, "", shards) 579 if err != nil { 580 return nil, err 581 } 582 } else { 583 for _, group := range groups { 584 ids, err := q.seriesForMatcher(ctx, from, through, group, shards) 585 if err != nil { 586 return nil, err 587 } 588 results = append(results, ids...) 589 } 590 } 591 return results, nil 592 } 593 594 // seriesForMatcher fetches series from the store for a given matcher 595 func (q *SingleTenantQuerier) seriesForMatcher(ctx context.Context, from, through time.Time, matcher string, shards []string) ([]logproto.SeriesIdentifier, error) { 596 ids, err := q.store.Series(ctx, logql.SelectLogParams{ 597 QueryRequest: &logproto.QueryRequest{ 598 Selector: matcher, 599 Limit: 1, 600 Start: from, 601 End: through, 602 Direction: logproto.FORWARD, 603 Shards: shards, 604 }, 605 }) 606 if err != nil { 607 return nil, err 608 } 609 return ids, nil 610 } 611 612 func (q *SingleTenantQuerier) validateQueryRequest(ctx context.Context, req logql.QueryParams) (time.Time, time.Time, error) { 613 userID, err := tenant.TenantID(ctx) 614 if err != nil { 615 return time.Time{}, time.Time{}, err 616 } 617 618 selector, err := req.LogSelector() 619 if err != nil { 620 return time.Time{}, time.Time{}, err 621 } 622 matchers := selector.Matchers() 623 624 maxStreamMatchersPerQuery := q.limits.MaxStreamsMatchersPerQuery(userID) 625 if len(matchers) > maxStreamMatchersPerQuery { 626 return time.Time{}, time.Time{}, httpgrpc.Errorf(http.StatusBadRequest, 627 "max streams matchers per query exceeded, matchers-count > limit (%d > %d)", len(matchers), maxStreamMatchersPerQuery) 628 } 629 630 return validateQueryTimeRangeLimits(ctx, userID, q.limits, req.GetStart(), req.GetEnd()) 631 } 632 633 type timeRangeLimits interface { 634 MaxQueryLookback(string) time.Duration 635 MaxQueryLength(string) time.Duration 636 } 637 638 func validateQueryTimeRangeLimits(ctx context.Context, userID string, limits timeRangeLimits, from, through time.Time) (time.Time, time.Time, error) { 639 now := nowFunc() 640 // Clamp the time range based on the max query lookback. 641 if maxQueryLookback := limits.MaxQueryLookback(userID); maxQueryLookback > 0 && from.Before(now.Add(-maxQueryLookback)) { 642 origStartTime := from 643 from = now.Add(-maxQueryLookback) 644 645 level.Debug(spanlogger.FromContext(ctx)).Log( 646 "msg", "the start time of the query has been manipulated because of the 'max query lookback' setting", 647 "original", origStartTime, 648 "updated", from) 649 650 } 651 if maxQueryLength := limits.MaxQueryLength(userID); maxQueryLength > 0 && (through).Sub(from) > maxQueryLength { 652 return time.Time{}, time.Time{}, httpgrpc.Errorf(http.StatusBadRequest, util_validation.ErrQueryTooLong, (through).Sub(from), maxQueryLength) 653 } 654 if through.Before(from) { 655 return time.Time{}, time.Time{}, httpgrpc.Errorf(http.StatusBadRequest, "invalid query, through < from (%s < %s)", through, from) 656 } 657 return from, through, nil 658 } 659 660 func (q *SingleTenantQuerier) checkTailRequestLimit(ctx context.Context) error { 661 userID, err := tenant.TenantID(ctx) 662 if err != nil { 663 return err 664 } 665 666 responses, err := q.ingesterQuerier.TailersCount(ctx) 667 // We are only checking active ingesters, and any error returned stops checking other ingesters 668 // so return that error here as well. 669 if err != nil { 670 return err 671 } 672 673 var maxCnt uint32 674 maxCnt = 0 675 for _, resp := range responses { 676 if resp > maxCnt { 677 maxCnt = resp 678 } 679 } 680 l := uint32(q.limits.MaxConcurrentTailRequests(userID)) 681 if maxCnt >= l { 682 return httpgrpc.Errorf(http.StatusBadRequest, 683 "max concurrent tail requests limit exceeded, count > limit (%d > %d)", maxCnt+1, l) 684 } 685 686 return nil 687 } 688 689 func (q *SingleTenantQuerier) IndexStats(ctx context.Context, req *loghttp.RangeQuery) (*stats.Stats, error) { 690 userID, err := tenant.TenantID(ctx) 691 if err != nil { 692 return nil, err 693 } 694 695 start, end, err := validateQueryTimeRangeLimits(ctx, userID, q.limits, req.Start, req.End) 696 if err != nil { 697 return nil, err 698 } 699 700 matchers, err := syntax.ParseMatchers(req.Query) 701 if err != nil { 702 return nil, err 703 } 704 705 // Enforce the query timeout while querying backends 706 ctx, cancel := context.WithDeadline(ctx, time.Now().Add(q.cfg.QueryTimeout)) 707 defer cancel() 708 709 return q.store.Stats( 710 ctx, 711 userID, 712 model.TimeFromUnixNano(start.UnixNano()), 713 model.TimeFromUnixNano(end.UnixNano()), 714 matchers..., 715 ) 716 717 }