github.com/thanos-io/thanos@v0.32.5/pkg/store/prometheus.go (about) 1 // Copyright (c) The Thanos Authors. 2 // Licensed under the Apache License 2.0. 3 4 package store 5 6 import ( 7 "bytes" 8 "context" 9 "fmt" 10 "hash" 11 "io" 12 "net/http" 13 "net/url" 14 "path" 15 "sort" 16 "strings" 17 "sync" 18 19 "github.com/prometheus/common/model" 20 "github.com/prometheus/prometheus/model/timestamp" 21 22 "github.com/blang/semver/v4" 23 "github.com/go-kit/log" 24 "github.com/go-kit/log/level" 25 "github.com/gogo/protobuf/proto" 26 "github.com/golang/snappy" 27 "github.com/pkg/errors" 28 "github.com/prometheus/client_golang/prometheus" 29 "github.com/prometheus/client_golang/prometheus/promauto" 30 "github.com/prometheus/prometheus/model/labels" 31 "github.com/prometheus/prometheus/storage/remote" 32 "github.com/prometheus/prometheus/tsdb/chunkenc" 33 "google.golang.org/grpc/codes" 34 "google.golang.org/grpc/status" 35 36 "github.com/thanos-io/thanos/pkg/component" 37 "github.com/thanos-io/thanos/pkg/dedup" 38 "github.com/thanos-io/thanos/pkg/httpconfig" 39 "github.com/thanos-io/thanos/pkg/info/infopb" 40 "github.com/thanos-io/thanos/pkg/promclient" 41 "github.com/thanos-io/thanos/pkg/runutil" 42 "github.com/thanos-io/thanos/pkg/store/labelpb" 43 "github.com/thanos-io/thanos/pkg/store/storepb" 44 "github.com/thanos-io/thanos/pkg/store/storepb/prompb" 45 "github.com/thanos-io/thanos/pkg/tracing" 46 ) 47 48 // PrometheusStore implements the store node API on top of the Prometheus remote read API. 49 type PrometheusStore struct { 50 logger log.Logger 51 base *url.URL 52 client *promclient.Client 53 buffers sync.Pool 54 component component.StoreAPI 55 externalLabelsFn func() labels.Labels 56 57 promVersion func() string 58 timestamps func() (mint int64, maxt int64) 59 60 remoteReadAcceptableResponses []prompb.ReadRequest_ResponseType 61 62 framesRead prometheus.Histogram 63 } 64 65 // Label{Values,Names} call with matchers is supported for Prometheus versions >= 2.24.0. 66 // https://github.com/prometheus/prometheus/commit/caa173d2aac4c390546b1f78302104b1ccae0878. 67 var baseVer, _ = semver.Make("2.24.0") 68 69 const initialBufSize = 32 * 1024 // 32KB seems like a good minimum starting size for sync pool size. 70 71 // NewPrometheusStore returns a new PrometheusStore that uses the given HTTP client 72 // to talk to Prometheus. 73 // It attaches the provided external labels to all results. Provided external labels has to be sorted. 74 func NewPrometheusStore( 75 logger log.Logger, 76 reg prometheus.Registerer, 77 client *promclient.Client, 78 baseURL *url.URL, 79 component component.StoreAPI, 80 externalLabelsFn func() labels.Labels, 81 timestamps func() (mint int64, maxt int64), 82 promVersion func() string, 83 ) (*PrometheusStore, error) { 84 if logger == nil { 85 logger = log.NewNopLogger() 86 } 87 p := &PrometheusStore{ 88 logger: logger, 89 base: baseURL, 90 client: client, 91 component: component, 92 externalLabelsFn: externalLabelsFn, 93 promVersion: promVersion, 94 timestamps: timestamps, 95 remoteReadAcceptableResponses: []prompb.ReadRequest_ResponseType{prompb.ReadRequest_STREAMED_XOR_CHUNKS, prompb.ReadRequest_SAMPLES}, 96 buffers: sync.Pool{New: func() interface{} { 97 b := make([]byte, 0, initialBufSize) 98 return &b 99 }}, 100 framesRead: promauto.With(reg).NewHistogram( 101 prometheus.HistogramOpts{ 102 Name: "prometheus_store_received_frames", 103 Help: "Number of frames received per streamed response.", 104 Buckets: prometheus.ExponentialBuckets(10, 10, 5), 105 }, 106 ), 107 } 108 return p, nil 109 } 110 111 // Info returns store information about the Prometheus instance. 112 // NOTE(bwplotka): MaxTime & MinTime are not accurate nor adjusted dynamically. 113 // This is fine for now, but might be needed in future. 114 func (p *PrometheusStore) Info(_ context.Context, _ *storepb.InfoRequest) (*storepb.InfoResponse, error) { 115 lset := p.externalLabelsFn() 116 mint, maxt := p.timestamps() 117 118 res := &storepb.InfoResponse{ 119 Labels: make([]labelpb.ZLabel, 0, len(lset)), 120 StoreType: p.component.ToProto(), 121 MinTime: mint, 122 MaxTime: maxt, 123 } 124 res.Labels = append(res.Labels, labelpb.ZLabelsFromPromLabels(lset)...) 125 126 // Until we deprecate the single labels in the reply, we just duplicate 127 // them here for migration/compatibility purposes. 128 res.LabelSets = []labelpb.ZLabelSet{} 129 if len(res.Labels) > 0 { 130 res.LabelSets = append(res.LabelSets, labelpb.ZLabelSet{ 131 Labels: res.Labels, 132 }) 133 } 134 return res, nil 135 } 136 137 func (p *PrometheusStore) getBuffer() *[]byte { 138 b := p.buffers.Get() 139 return b.(*[]byte) 140 } 141 142 func (p *PrometheusStore) putBuffer(b *[]byte) { 143 p.buffers.Put(b) 144 } 145 146 // Series returns all series for a requested time range and label matcher. 147 func (p *PrometheusStore) Series(r *storepb.SeriesRequest, seriesSrv storepb.Store_SeriesServer) error { 148 s := newFlushableServer(seriesSrv, sortingStrategyStore) 149 150 extLset := p.externalLabelsFn() 151 152 match, matchers, err := matchesExternalLabels(r.Matchers, extLset) 153 if err != nil { 154 return status.Error(codes.InvalidArgument, err.Error()) 155 } 156 if !match { 157 return nil 158 } 159 if len(matchers) == 0 { 160 return status.Error(codes.InvalidArgument, "no matchers specified (excluding external labels)") 161 } 162 163 // Don't ask for more than available time. This includes potential `minTime` flag limit. 164 availableMinTime, _ := p.timestamps() 165 if r.MinTime < availableMinTime { 166 // If pushdown is enabled then align min time with the step to avoid missing data 167 // when it gets retrieved by the upper layer's PromQL engine. 168 // This also is necessary when Sidecar uploads a block and then availableMinTime 169 // becomes a fixed timestamp. 170 if r.QueryHints != nil && r.QueryHints.StepMillis != 0 { 171 diff := availableMinTime - r.MinTime 172 r.MinTime += (diff / r.QueryHints.StepMillis) * r.QueryHints.StepMillis 173 // Add one more to strictly fit within --min-time -> infinity. 174 if r.MinTime != availableMinTime { 175 r.MinTime += r.QueryHints.StepMillis 176 } 177 } else { 178 r.MinTime = availableMinTime 179 } 180 } 181 182 extLsetToRemove := map[string]struct{}{} 183 for _, lbl := range r.WithoutReplicaLabels { 184 extLsetToRemove[lbl] = struct{}{} 185 } 186 187 if r.SkipChunks { 188 finalExtLset := rmLabels(extLset.Copy(), extLsetToRemove) 189 labelMaps, err := p.client.SeriesInGRPC(s.Context(), p.base, matchers, r.MinTime, r.MaxTime) 190 if err != nil { 191 return err 192 } 193 for _, lbm := range labelMaps { 194 lset := make([]labelpb.ZLabel, 0, len(lbm)+len(finalExtLset)) 195 for k, v := range lbm { 196 lset = append(lset, labelpb.ZLabel{Name: k, Value: v}) 197 } 198 lset = append(lset, labelpb.ZLabelsFromPromLabels(finalExtLset)...) 199 sort.Slice(lset, func(i, j int) bool { 200 return lset[i].Name < lset[j].Name 201 }) 202 if err = s.Send(storepb.NewSeriesResponse(&storepb.Series{Labels: lset})); err != nil { 203 return err 204 } 205 } 206 return s.Flush() 207 } 208 209 shardMatcher := r.ShardInfo.Matcher(&p.buffers) 210 defer shardMatcher.Close() 211 212 if r.QueryHints != nil && r.QueryHints.IsSafeToExecute() && !shardMatcher.IsSharded() { 213 return p.queryPrometheus(s, r, extLsetToRemove) 214 } 215 216 q := &prompb.Query{StartTimestampMs: r.MinTime, EndTimestampMs: r.MaxTime} 217 for _, m := range matchers { 218 pm := &prompb.LabelMatcher{Name: m.Name, Value: m.Value} 219 220 switch m.Type { 221 case labels.MatchEqual: 222 pm.Type = prompb.LabelMatcher_EQ 223 case labels.MatchNotEqual: 224 pm.Type = prompb.LabelMatcher_NEQ 225 case labels.MatchRegexp: 226 pm.Type = prompb.LabelMatcher_RE 227 case labels.MatchNotRegexp: 228 pm.Type = prompb.LabelMatcher_NRE 229 default: 230 return errors.New("unrecognized matcher type") 231 } 232 q.Matchers = append(q.Matchers, pm) 233 } 234 235 queryPrometheusSpan, ctx := tracing.StartSpan(s.Context(), "query_prometheus") 236 queryPrometheusSpan.SetTag("query.request", q.String()) 237 238 httpResp, err := p.startPromRemoteRead(ctx, q) 239 if err != nil { 240 queryPrometheusSpan.Finish() 241 return errors.Wrap(err, "query Prometheus") 242 } 243 244 // Negotiate content. We requested streamed chunked response type, but still we need to support old versions of 245 // remote read. 246 contentType := httpResp.Header.Get("Content-Type") 247 if strings.HasPrefix(contentType, "application/x-protobuf") { 248 return p.handleSampledPrometheusResponse(s, httpResp, queryPrometheusSpan, extLset, enableChunkHashCalculation, extLsetToRemove) 249 } 250 251 if !strings.HasPrefix(contentType, "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse") { 252 return errors.Errorf("not supported remote read content type: %s", contentType) 253 } 254 return p.handleStreamedPrometheusResponse(s, shardMatcher, httpResp, queryPrometheusSpan, extLset, enableChunkHashCalculation, extLsetToRemove) 255 } 256 257 func (p *PrometheusStore) queryPrometheus( 258 s flushableServer, 259 r *storepb.SeriesRequest, 260 extLsetToRemove map[string]struct{}, 261 ) error { 262 var matrix model.Matrix 263 264 opts := promclient.QueryOptions{} 265 step := r.QueryHints.StepMillis / 1000 266 if step != 0 { 267 result, _, _, err := p.client.QueryRange(s.Context(), p.base, r.ToPromQL(), r.MinTime, r.MaxTime, step, opts) 268 if err != nil { 269 return err 270 } 271 matrix = result 272 } else { 273 vector, _, _, err := p.client.QueryInstant(s.Context(), p.base, r.ToPromQL(), timestamp.Time(r.MaxTime), opts) 274 if err != nil { 275 return err 276 } 277 278 matrix = make(model.Matrix, 0, len(vector)) 279 for _, sample := range vector { 280 matrix = append(matrix, &model.SampleStream{ 281 Metric: sample.Metric, 282 Values: []model.SamplePair{ 283 { 284 Timestamp: sample.Timestamp, 285 Value: sample.Value, 286 }, 287 }, 288 }) 289 } 290 } 291 292 externalLbls := rmLabels(p.externalLabelsFn().Copy(), extLsetToRemove) 293 for _, vector := range matrix { 294 seriesLbls := labels.Labels(make([]labels.Label, 0, len(vector.Metric))) 295 296 // Attach labels from samples. 297 for k, v := range vector.Metric { 298 seriesLbls = append(seriesLbls, labels.FromStrings(string(k), string(v))...) 299 } 300 sort.Slice(seriesLbls, func(i, j int) bool { 301 return seriesLbls.Less(i, j) 302 }) 303 // Attach external labels for compatibility with remote read. 304 finalLbls := labelpb.ExtendSortedLabels(seriesLbls, externalLbls) 305 finalLbls = append(finalLbls, dedup.PushdownMarker) 306 307 series := &prompb.TimeSeries{ 308 Labels: labelpb.ZLabelsFromPromLabels(finalLbls), 309 Samples: prompb.SamplesFromSamplePairs(vector.Values), 310 } 311 312 chks, err := p.chunkSamples(series, MaxSamplesPerChunk, enableChunkHashCalculation) 313 if err != nil { 314 return err 315 } 316 317 if err := s.Send(storepb.NewSeriesResponse(&storepb.Series{ 318 Labels: series.Labels, 319 Chunks: chks, 320 })); err != nil { 321 return err 322 } 323 } 324 325 return s.Flush() 326 } 327 328 func (p *PrometheusStore) handleSampledPrometheusResponse( 329 s flushableServer, 330 httpResp *http.Response, 331 querySpan tracing.Span, 332 extLset labels.Labels, 333 calculateChecksums bool, 334 extLsetToRemove map[string]struct{}, 335 ) error { 336 level.Debug(p.logger).Log("msg", "started handling ReadRequest_SAMPLED response type.") 337 338 resp, err := p.fetchSampledResponse(s.Context(), httpResp) 339 querySpan.Finish() 340 if err != nil { 341 return err 342 } 343 344 span, _ := tracing.StartSpan(s.Context(), "transform_and_respond") 345 defer span.Finish() 346 span.SetTag("series_count", len(resp.Results[0].Timeseries)) 347 348 for _, e := range resp.Results[0].Timeseries { 349 // https://github.com/prometheus/prometheus/blob/3f6f5d3357e232abe53f1775f893fdf8f842712c/storage/remote/read_handler.go#L166 350 // MergeLabels() prefers local labels over external labels but we prefer 351 // external labels hence we need to do this: 352 lset := rmLabels(labelpb.ExtendSortedLabels(labelpb.ZLabelsToPromLabels(e.Labels), extLset), extLsetToRemove) 353 if len(e.Samples) == 0 { 354 // As found in https://github.com/thanos-io/thanos/issues/381 355 // Prometheus can give us completely empty time series. Ignore these with log until we figure out that 356 // this is expected from Prometheus perspective. 357 level.Warn(p.logger).Log( 358 "msg", 359 "found timeseries without any chunk. See https://github.com/thanos-io/thanos/issues/381 for details", 360 "lset", 361 fmt.Sprintf("%v", lset), 362 ) 363 continue 364 } 365 366 aggregatedChunks, err := p.chunkSamples(e, MaxSamplesPerChunk, calculateChecksums) 367 if err != nil { 368 return err 369 } 370 371 if err := s.Send(storepb.NewSeriesResponse(&storepb.Series{ 372 Labels: labelpb.ZLabelsFromPromLabels(lset), 373 Chunks: aggregatedChunks, 374 })); err != nil { 375 return err 376 } 377 } 378 level.Debug(p.logger).Log("msg", "handled ReadRequest_SAMPLED request.", "series", len(resp.Results[0].Timeseries)) 379 return s.Flush() 380 } 381 382 func (p *PrometheusStore) handleStreamedPrometheusResponse( 383 s flushableServer, 384 shardMatcher *storepb.ShardMatcher, 385 httpResp *http.Response, 386 querySpan tracing.Span, 387 extLset labels.Labels, 388 calculateChecksums bool, 389 extLsetToRemove map[string]struct{}, 390 ) error { 391 level.Debug(p.logger).Log("msg", "started handling ReadRequest_STREAMED_XOR_CHUNKS streamed read response.") 392 393 framesNum := 0 394 395 defer func() { 396 p.framesRead.Observe(float64(framesNum)) 397 querySpan.SetTag("frames", framesNum) 398 querySpan.Finish() 399 }() 400 defer runutil.CloseWithLogOnErr(p.logger, httpResp.Body, "prom series request body") 401 402 var data = p.getBuffer() 403 defer p.putBuffer(data) 404 405 bodySizer := NewBytesRead(httpResp.Body) 406 seriesStats := &storepb.SeriesStatsCounter{} 407 408 // TODO(bwplotka): Put read limit as a flag. 409 stream := remote.NewChunkedReader(bodySizer, remote.DefaultChunkedReadLimit, *data) 410 hasher := hashPool.Get().(hash.Hash64) 411 defer hashPool.Put(hasher) 412 for { 413 res := &prompb.ChunkedReadResponse{} 414 err := stream.NextProto(res) 415 if err == io.EOF { 416 break 417 } 418 if err != nil { 419 return errors.Wrap(err, "next proto") 420 } 421 422 if len(res.ChunkedSeries) != 1 { 423 level.Warn(p.logger).Log("msg", "Prometheus ReadRequest_STREAMED_XOR_CHUNKS returned non 1 series in frame", "series", len(res.ChunkedSeries)) 424 } 425 426 framesNum++ 427 for _, series := range res.ChunkedSeries { 428 // MergeLabels() prefers local labels over external labels but we prefer 429 // external labels hence we need to do this: 430 // https://github.com/prometheus/prometheus/blob/3f6f5d3357e232abe53f1775f893fdf8f842712c/storage/remote/codec.go#L210. 431 completeLabelset := rmLabels(labelpb.ExtendSortedLabels(labelpb.ZLabelsToPromLabels(series.Labels), extLset), extLsetToRemove) 432 if !shardMatcher.MatchesLabels(completeLabelset) { 433 continue 434 } 435 436 seriesStats.CountSeries(series.Labels) 437 thanosChks := make([]storepb.AggrChunk, len(series.Chunks)) 438 439 for i, chk := range series.Chunks { 440 chkHash := hashChunk(hasher, chk.Data, calculateChecksums) 441 thanosChks[i] = storepb.AggrChunk{ 442 MaxTime: chk.MaxTimeMs, 443 MinTime: chk.MinTimeMs, 444 Raw: &storepb.Chunk{ 445 Data: chk.Data, 446 // Prometheus ChunkEncoding vs ours https://github.com/thanos-io/thanos/blob/master/pkg/store/storepb/types.proto#L19 447 // has one difference. Prometheus has Chunk_UNKNOWN Chunk_Encoding = 0 vs we start from 448 // XOR as 0. Compensate for that here: 449 Type: storepb.Chunk_Encoding(chk.Type - 1), 450 Hash: chkHash, 451 }, 452 } 453 seriesStats.Samples += thanosChks[i].Raw.XORNumSamples() 454 seriesStats.Chunks++ 455 456 // Drop the reference to data from non protobuf for GC. 457 series.Chunks[i].Data = nil 458 } 459 460 r := storepb.NewSeriesResponse(&storepb.Series{ 461 Labels: labelpb.ZLabelsFromPromLabels(completeLabelset), 462 Chunks: thanosChks, 463 }) 464 if err := s.Send(r); err != nil { 465 return err 466 } 467 } 468 } 469 470 querySpan.SetTag("processed.series", seriesStats.Series) 471 querySpan.SetTag("processed.chunks", seriesStats.Chunks) 472 querySpan.SetTag("processed.samples", seriesStats.Samples) 473 querySpan.SetTag("processed.bytes", bodySizer.BytesCount()) 474 level.Debug(p.logger).Log("msg", "handled ReadRequest_STREAMED_XOR_CHUNKS request.", "frames", framesNum) 475 476 return s.Flush() 477 } 478 479 type BytesCounter struct { 480 io.ReadCloser 481 bytesCount int 482 } 483 484 func NewBytesRead(rc io.ReadCloser) *BytesCounter { 485 return &BytesCounter{ReadCloser: rc} 486 } 487 488 func (s *BytesCounter) Read(p []byte) (n int, err error) { 489 n, err = s.ReadCloser.Read(p) 490 s.bytesCount += n 491 return n, err 492 } 493 494 func (s *BytesCounter) BytesCount() int { 495 return s.bytesCount 496 } 497 498 func (p *PrometheusStore) fetchSampledResponse(ctx context.Context, resp *http.Response) (_ *prompb.ReadResponse, err error) { 499 defer runutil.ExhaustCloseWithLogOnErr(p.logger, resp.Body, "prom series request body") 500 501 b := p.getBuffer() 502 buf := bytes.NewBuffer(*b) 503 defer p.putBuffer(b) 504 if _, err := io.Copy(buf, resp.Body); err != nil { 505 return nil, errors.Wrap(err, "copy response") 506 } 507 508 sb := p.getBuffer() 509 var decomp []byte 510 tracing.DoInSpan(ctx, "decompress_response", func(ctx context.Context) { 511 decomp, err = snappy.Decode(*sb, buf.Bytes()) 512 }) 513 defer p.putBuffer(sb) 514 if err != nil { 515 return nil, errors.Wrap(err, "decompress response") 516 } 517 518 var data prompb.ReadResponse 519 tracing.DoInSpan(ctx, "unmarshal_response", func(ctx context.Context) { 520 err = proto.Unmarshal(decomp, &data) 521 }) 522 if err != nil { 523 return nil, errors.Wrap(err, "unmarshal response") 524 } 525 if len(data.Results) != 1 { 526 return nil, errors.Errorf("unexpected result size %d", len(data.Results)) 527 } 528 529 return &data, nil 530 } 531 532 func (p *PrometheusStore) chunkSamples(series *prompb.TimeSeries, maxSamplesPerChunk int, calculateChecksums bool) (chks []storepb.AggrChunk, err error) { 533 samples := series.Samples 534 hasher := hashPool.Get().(hash.Hash64) 535 defer hashPool.Put(hasher) 536 537 for len(samples) > 0 { 538 chunkSize := len(samples) 539 if chunkSize > maxSamplesPerChunk { 540 chunkSize = maxSamplesPerChunk 541 } 542 543 enc, cb, err := p.encodeChunk(samples[:chunkSize]) 544 if err != nil { 545 return nil, status.Error(codes.Unknown, err.Error()) 546 } 547 548 chkHash := hashChunk(hasher, cb, calculateChecksums) 549 chks = append(chks, storepb.AggrChunk{ 550 MinTime: samples[0].Timestamp, 551 MaxTime: samples[chunkSize-1].Timestamp, 552 Raw: &storepb.Chunk{Type: enc, Data: cb, Hash: chkHash}, 553 }) 554 555 samples = samples[chunkSize:] 556 } 557 558 return chks, nil 559 } 560 561 func (p *PrometheusStore) startPromRemoteRead(ctx context.Context, q *prompb.Query) (presp *http.Response, err error) { 562 reqb, err := proto.Marshal(&prompb.ReadRequest{ 563 Queries: []*prompb.Query{q}, 564 AcceptedResponseTypes: p.remoteReadAcceptableResponses, 565 }) 566 if err != nil { 567 return nil, errors.Wrap(err, "marshal read request") 568 } 569 570 u := *p.base 571 u.Path = path.Join(u.Path, "api/v1/read") 572 573 preq, err := http.NewRequest("POST", u.String(), bytes.NewReader(snappy.Encode(nil, reqb))) 574 if err != nil { 575 return nil, errors.Wrap(err, "unable to create request") 576 } 577 preq.Header.Add("Content-Encoding", "snappy") 578 preq.Header.Set("Content-Type", "application/x-stream-protobuf") 579 preq.Header.Set("X-Prometheus-Remote-Read-Version", "0.1.0") 580 581 preq.Header.Set("User-Agent", httpconfig.ThanosUserAgent) 582 presp, err = p.client.Do(preq.WithContext(ctx)) 583 if err != nil { 584 return nil, errors.Wrap(err, "send request") 585 } 586 if presp.StatusCode/100 != 2 { 587 // Best effort read. 588 b, err := io.ReadAll(presp.Body) 589 if err != nil { 590 level.Error(p.logger).Log("msg", "failed to read response from non 2XX remote read request", "err", err) 591 } 592 _ = presp.Body.Close() 593 return nil, errors.Errorf("request failed with code %s; msg %s", presp.Status, string(b)) 594 } 595 596 return presp, nil 597 } 598 599 // matchesExternalLabels returns false if given matchers are not matching external labels. 600 // If true, matchesExternalLabels also returns Prometheus matchers without those matching external labels. 601 func matchesExternalLabels(ms []storepb.LabelMatcher, externalLabels labels.Labels) (bool, []*labels.Matcher, error) { 602 tms, err := storepb.MatchersToPromMatchers(ms...) 603 if err != nil { 604 return false, nil, err 605 } 606 607 if len(externalLabels) == 0 { 608 return true, tms, nil 609 } 610 611 var newMatchers []*labels.Matcher 612 for i, tm := range tms { 613 // Validate all matchers. 614 extValue := externalLabels.Get(tm.Name) 615 if extValue == "" { 616 // Agnostic to external labels. 617 tms = append(tms[:i], tms[i:]...) 618 newMatchers = append(newMatchers, tm) 619 continue 620 } 621 622 if !tm.Matches(extValue) { 623 // External label does not match. This should not happen - it should be filtered out on query node, 624 // but let's do that anyway here. 625 return false, nil, nil 626 } 627 } 628 return true, newMatchers, nil 629 } 630 631 // encodeChunk translates the sample pairs into a chunk. 632 // TODO(kakkoyun): Linter - result 0 (github.com/thanos-io/thanos/pkg/store/storepb.Chunk_Encoding) is always 0. 633 func (p *PrometheusStore) encodeChunk(ss []prompb.Sample) (storepb.Chunk_Encoding, []byte, error) { //nolint:unparam 634 c := chunkenc.NewXORChunk() 635 636 a, err := c.Appender() 637 if err != nil { 638 return 0, nil, err 639 } 640 for _, s := range ss { 641 a.Append(s.Timestamp, s.Value) 642 } 643 return storepb.Chunk_XOR, c.Bytes(), nil 644 } 645 646 // LabelNames returns all known label names of series that match the given matchers. 647 func (p *PrometheusStore) LabelNames(ctx context.Context, r *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) { 648 extLset := p.externalLabelsFn() 649 650 match, matchers, err := matchesExternalLabels(r.Matchers, extLset) 651 if err != nil { 652 return nil, status.Error(codes.InvalidArgument, err.Error()) 653 } 654 if !match { 655 return &storepb.LabelNamesResponse{Names: nil}, nil 656 } 657 658 var lbls []string 659 version, parseErr := semver.Parse(p.promVersion()) 660 if len(matchers) == 0 || (parseErr == nil && version.GTE(baseVer)) { 661 lbls, err = p.client.LabelNamesInGRPC(ctx, p.base, matchers, r.Start, r.End) 662 if err != nil { 663 return nil, err 664 } 665 } else { 666 sers, err := p.client.SeriesInGRPC(ctx, p.base, matchers, r.Start, r.End) 667 if err != nil { 668 return nil, err 669 } 670 671 // Using set to handle duplicate values. 672 labelNamesSet := make(map[string]struct{}) 673 for _, s := range sers { 674 for labelName := range s { 675 labelNamesSet[labelName] = struct{}{} 676 } 677 } 678 679 for key := range labelNamesSet { 680 lbls = append(lbls, key) 681 } 682 } 683 684 if len(lbls) > 0 { 685 for _, extLbl := range extLset { 686 lbls = append(lbls, extLbl.Name) 687 } 688 sort.Strings(lbls) 689 } 690 691 return &storepb.LabelNamesResponse{Names: lbls}, nil 692 } 693 694 // LabelValues returns all known label values for a given label name. 695 func (p *PrometheusStore) LabelValues(ctx context.Context, r *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error) { 696 if r.Label == "" { 697 return nil, status.Error(codes.InvalidArgument, "label name parameter cannot be empty") 698 } 699 700 extLset := p.externalLabelsFn() 701 702 match, matchers, err := matchesExternalLabels(r.Matchers, extLset) 703 if err != nil { 704 return nil, status.Error(codes.InvalidArgument, err.Error()) 705 } 706 if !match { 707 return &storepb.LabelValuesResponse{Values: nil}, nil 708 } 709 710 // First check for matching external label which has priority. 711 if l := extLset.Get(r.Label); l != "" { 712 return &storepb.LabelValuesResponse{Values: []string{l}}, nil 713 } 714 715 var ( 716 sers []map[string]string 717 vals []string 718 ) 719 720 version, parseErr := semver.Parse(p.promVersion()) 721 if len(matchers) == 0 || (parseErr == nil && version.GTE(baseVer)) { 722 vals, err = p.client.LabelValuesInGRPC(ctx, p.base, r.Label, matchers, r.Start, r.End) 723 if err != nil { 724 return nil, err 725 } 726 } else { 727 sers, err = p.client.SeriesInGRPC(ctx, p.base, matchers, r.Start, r.End) 728 if err != nil { 729 return nil, err 730 } 731 732 // Using set to handle duplicate values. 733 labelValuesSet := make(map[string]struct{}) 734 for _, s := range sers { 735 if val, exists := s[r.Label]; exists { 736 labelValuesSet[val] = struct{}{} 737 } 738 } 739 for key := range labelValuesSet { 740 vals = append(vals, key) 741 } 742 } 743 744 sort.Strings(vals) 745 return &storepb.LabelValuesResponse{Values: vals}, nil 746 } 747 748 func (p *PrometheusStore) LabelSet() []labelpb.ZLabelSet { 749 lset := p.externalLabelsFn() 750 751 labels := make([]labelpb.ZLabel, 0, len(lset)) 752 labels = append(labels, labelpb.ZLabelsFromPromLabels(lset)...) 753 754 labelset := []labelpb.ZLabelSet{} 755 if len(labels) > 0 { 756 labelset = append(labelset, labelpb.ZLabelSet{ 757 Labels: labels, 758 }) 759 } 760 761 return labelset 762 } 763 764 func (p *PrometheusStore) TSDBInfos() []infopb.TSDBInfo { 765 labels := p.LabelSet() 766 if len(labels) == 0 { 767 return []infopb.TSDBInfo{} 768 } 769 770 mint, maxt := p.Timestamps() 771 return []infopb.TSDBInfo{ 772 { 773 Labels: labelpb.ZLabelSet{ 774 Labels: labels[0].Labels, 775 }, 776 MinTime: mint, 777 MaxTime: maxt, 778 }, 779 } 780 } 781 782 func (p *PrometheusStore) Timestamps() (mint int64, maxt int64) { 783 return p.timestamps() 784 }