github.com/thanos-io/thanos@v0.32.5/pkg/store/tsdb.go (about) 1 // Copyright (c) The Thanos Authors. 2 // Licensed under the Apache License 2.0. 3 4 package store 5 6 import ( 7 "context" 8 "hash" 9 "io" 10 "math" 11 "sort" 12 "strings" 13 "sync" 14 15 "github.com/go-kit/log" 16 "github.com/pkg/errors" 17 "github.com/prometheus/prometheus/model/labels" 18 "github.com/prometheus/prometheus/storage" 19 "google.golang.org/grpc" 20 "google.golang.org/grpc/codes" 21 "google.golang.org/grpc/status" 22 23 "github.com/thanos-io/thanos/pkg/component" 24 "github.com/thanos-io/thanos/pkg/info/infopb" 25 "github.com/thanos-io/thanos/pkg/runutil" 26 "github.com/thanos-io/thanos/pkg/store/labelpb" 27 "github.com/thanos-io/thanos/pkg/store/storepb" 28 ) 29 30 const RemoteReadFrameLimit = 1048576 31 32 type TSDBReader interface { 33 storage.ChunkQueryable 34 StartTime() (int64, error) 35 } 36 37 // TSDBStore implements the store API against a local TSDB instance. 38 // It attaches the provided external labels to all results. It only responds with raw data 39 // and does not support downsampling. 40 type TSDBStore struct { 41 logger log.Logger 42 db TSDBReader 43 component component.StoreAPI 44 buffers sync.Pool 45 maxBytesPerFrame int 46 47 extLset labels.Labels 48 mtx sync.RWMutex 49 } 50 51 func RegisterWritableStoreServer(storeSrv storepb.WriteableStoreServer) func(*grpc.Server) { 52 return func(s *grpc.Server) { 53 storepb.RegisterWriteableStoreServer(s, storeSrv) 54 } 55 } 56 57 // ReadWriteTSDBStore is a TSDBStore that can also be written to. 58 type ReadWriteTSDBStore struct { 59 storepb.StoreServer 60 storepb.WriteableStoreServer 61 } 62 63 // NewTSDBStore creates a new TSDBStore. 64 // NOTE: Given lset has to be sorted. 65 func NewTSDBStore(logger log.Logger, db TSDBReader, component component.StoreAPI, extLset labels.Labels) *TSDBStore { 66 if logger == nil { 67 logger = log.NewNopLogger() 68 } 69 return &TSDBStore{ 70 logger: logger, 71 db: db, 72 component: component, 73 extLset: extLset, 74 maxBytesPerFrame: RemoteReadFrameLimit, 75 buffers: sync.Pool{New: func() interface{} { 76 b := make([]byte, 0, initialBufSize) 77 return &b 78 }}, 79 } 80 } 81 82 func (s *TSDBStore) SetExtLset(extLset labels.Labels) { 83 s.mtx.Lock() 84 defer s.mtx.Unlock() 85 86 s.extLset = extLset 87 } 88 89 func (s *TSDBStore) getExtLset() labels.Labels { 90 s.mtx.RLock() 91 defer s.mtx.RUnlock() 92 93 return s.extLset 94 } 95 96 // Info returns store information about the Prometheus instance. 97 func (s *TSDBStore) Info(_ context.Context, _ *storepb.InfoRequest) (*storepb.InfoResponse, error) { 98 minTime, err := s.db.StartTime() 99 if err != nil { 100 return nil, errors.Wrap(err, "TSDB min Time") 101 } 102 103 res := &storepb.InfoResponse{ 104 Labels: labelpb.ZLabelsFromPromLabels(s.getExtLset()), 105 StoreType: s.component.ToProto(), 106 MinTime: minTime, 107 MaxTime: math.MaxInt64, 108 } 109 110 // Until we deprecate the single labels in the reply, we just duplicate 111 // them here for migration/compatibility purposes. 112 res.LabelSets = []labelpb.ZLabelSet{} 113 if len(res.Labels) > 0 { 114 res.LabelSets = append(res.LabelSets, labelpb.ZLabelSet{ 115 Labels: res.Labels, 116 }) 117 } 118 return res, nil 119 } 120 121 func (s *TSDBStore) LabelSet() []labelpb.ZLabelSet { 122 labels := labelpb.ZLabelsFromPromLabels(s.getExtLset()) 123 labelSets := []labelpb.ZLabelSet{} 124 if len(labels) > 0 { 125 labelSets = append(labelSets, labelpb.ZLabelSet{ 126 Labels: labels, 127 }) 128 } 129 130 return labelSets 131 } 132 133 func (p *TSDBStore) TSDBInfos() []infopb.TSDBInfo { 134 labels := p.LabelSet() 135 if len(labels) == 0 { 136 return []infopb.TSDBInfo{} 137 } 138 139 mint, maxt := p.TimeRange() 140 return []infopb.TSDBInfo{ 141 { 142 Labels: labelpb.ZLabelSet{ 143 Labels: labels[0].Labels, 144 }, 145 MinTime: mint, 146 MaxTime: maxt, 147 }, 148 } 149 } 150 151 func (s *TSDBStore) TimeRange() (int64, int64) { 152 var minTime int64 = math.MinInt64 153 startTime, err := s.db.StartTime() 154 if err == nil { 155 // Since we always use tsdb.DB implementation, 156 // StartTime should never return error. 157 minTime = startTime 158 } 159 160 return minTime, math.MaxInt64 161 } 162 163 // CloseDelegator allows to delegate close (releasing resources used by request to the server). 164 // This is useful when we invoke StoreAPI within another StoreAPI and results are ephemeral until copied. 165 type CloseDelegator interface { 166 Delegate(io.Closer) 167 } 168 169 // Series returns all series for a requested time range and label matcher. The returned data may 170 // exceed the requested time bounds. 171 func (s *TSDBStore) Series(r *storepb.SeriesRequest, seriesSrv storepb.Store_SeriesServer) error { 172 srv := newFlushableServer(seriesSrv, sortingStrategyStore) 173 174 match, matchers, err := matchesExternalLabels(r.Matchers, s.getExtLset()) 175 if err != nil { 176 return status.Error(codes.InvalidArgument, err.Error()) 177 } 178 179 if !match { 180 return nil 181 } 182 183 if len(matchers) == 0 { 184 return status.Error(codes.InvalidArgument, errors.New("no matchers specified (excluding external labels)").Error()) 185 } 186 187 q, err := s.db.ChunkQuerier(context.Background(), r.MinTime, r.MaxTime) 188 if err != nil { 189 return status.Error(codes.Internal, err.Error()) 190 } 191 192 if cd, ok := srv.(CloseDelegator); ok { 193 cd.Delegate(q) 194 } else { 195 defer runutil.CloseWithLogOnErr(s.logger, q, "close tsdb chunk querier series") 196 } 197 198 set := q.Select(true, nil, matchers...) 199 200 shardMatcher := r.ShardInfo.Matcher(&s.buffers) 201 defer shardMatcher.Close() 202 hasher := hashPool.Get().(hash.Hash64) 203 defer hashPool.Put(hasher) 204 205 extLsetToRemove := map[string]struct{}{} 206 for _, lbl := range r.WithoutReplicaLabels { 207 extLsetToRemove[lbl] = struct{}{} 208 } 209 finalExtLset := rmLabels(s.extLset.Copy(), extLsetToRemove) 210 211 // Stream at most one series per frame; series may be split over multiple frames according to maxBytesInFrame. 212 for set.Next() { 213 series := set.At() 214 215 completeLabelset := labelpb.ExtendSortedLabels(rmLabels(series.Labels(), extLsetToRemove), finalExtLset) 216 if !shardMatcher.MatchesLabels(completeLabelset) { 217 continue 218 } 219 220 storeSeries := storepb.Series{Labels: labelpb.ZLabelsFromPromLabels(completeLabelset)} 221 if r.SkipChunks { 222 if err := srv.Send(storepb.NewSeriesResponse(&storeSeries)); err != nil { 223 return status.Error(codes.Aborted, err.Error()) 224 } 225 continue 226 } 227 228 bytesLeftForChunks := s.maxBytesPerFrame 229 for _, lbl := range storeSeries.Labels { 230 bytesLeftForChunks -= lbl.Size() 231 } 232 frameBytesLeft := bytesLeftForChunks 233 234 seriesChunks := []storepb.AggrChunk{} 235 chIter := series.Iterator(nil) 236 isNext := chIter.Next() 237 for isNext { 238 chk := chIter.At() 239 if chk.Chunk == nil { 240 return status.Errorf(codes.Internal, "TSDBStore: found not populated chunk returned by SeriesSet at ref: %v", chk.Ref) 241 } 242 243 chunkBytes := make([]byte, len(chk.Chunk.Bytes())) 244 copy(chunkBytes, chk.Chunk.Bytes()) 245 c := storepb.AggrChunk{ 246 MinTime: chk.MinTime, 247 MaxTime: chk.MaxTime, 248 Raw: &storepb.Chunk{ 249 Type: storepb.Chunk_Encoding(chk.Chunk.Encoding() - 1), // Proto chunk encoding is one off to TSDB one. 250 Data: chunkBytes, 251 Hash: hashChunk(hasher, chunkBytes, enableChunkHashCalculation), 252 }, 253 } 254 frameBytesLeft -= c.Size() 255 seriesChunks = append(seriesChunks, c) 256 257 // We are fine with minor inaccuracy of max bytes per frame. The inaccuracy will be max of full chunk size. 258 isNext = chIter.Next() 259 if frameBytesLeft > 0 && isNext { 260 continue 261 } 262 if err := srv.Send(storepb.NewSeriesResponse(&storepb.Series{Labels: storeSeries.Labels, Chunks: seriesChunks})); err != nil { 263 return status.Error(codes.Aborted, err.Error()) 264 } 265 266 if isNext { 267 frameBytesLeft = bytesLeftForChunks 268 seriesChunks = make([]storepb.AggrChunk, 0, len(seriesChunks)) 269 } 270 } 271 if err := chIter.Err(); err != nil { 272 return status.Error(codes.Internal, errors.Wrap(err, "chunk iter").Error()) 273 } 274 275 } 276 if err := set.Err(); err != nil { 277 return status.Error(codes.Internal, err.Error()) 278 } 279 for _, w := range set.Warnings() { 280 if err := srv.Send(storepb.NewWarnSeriesResponse(w)); err != nil { 281 return status.Error(codes.Aborted, err.Error()) 282 } 283 } 284 return srv.Flush() 285 } 286 287 // LabelNames returns all known label names constrained with the given matchers. 288 func (s *TSDBStore) LabelNames(ctx context.Context, r *storepb.LabelNamesRequest) ( 289 *storepb.LabelNamesResponse, error, 290 ) { 291 match, matchers, err := matchesExternalLabels(r.Matchers, s.getExtLset()) 292 if err != nil { 293 return nil, status.Error(codes.InvalidArgument, err.Error()) 294 } 295 296 if !match { 297 return &storepb.LabelNamesResponse{Names: nil}, nil 298 } 299 300 q, err := s.db.ChunkQuerier(ctx, r.Start, r.End) 301 if err != nil { 302 return nil, status.Error(codes.Internal, err.Error()) 303 } 304 defer runutil.CloseWithLogOnErr(s.logger, q, "close tsdb querier label names") 305 306 res, _, err := q.LabelNames(matchers...) 307 if err != nil { 308 return nil, status.Error(codes.Internal, err.Error()) 309 } 310 311 if len(res) > 0 { 312 for _, lbl := range s.getExtLset() { 313 res = append(res, lbl.Name) 314 } 315 sort.Strings(res) 316 } 317 318 // Label values can come from a postings table of a memory-mapped block which can be deleted during 319 // head compaction. Since we close the block querier before we return from the function, 320 // we need to copy label values to make sure the client still has access to the data when 321 // a block is deleted. 322 values := make([]string, len(res)) 323 for i := range res { 324 values[i] = strings.Clone(res[i]) 325 } 326 327 return &storepb.LabelNamesResponse{Names: values}, nil 328 } 329 330 // LabelValues returns all known label values for a given label name. 331 func (s *TSDBStore) LabelValues(ctx context.Context, r *storepb.LabelValuesRequest) ( 332 *storepb.LabelValuesResponse, error, 333 ) { 334 if r.Label == "" { 335 return nil, status.Error(codes.InvalidArgument, "label name parameter cannot be empty") 336 } 337 338 match, matchers, err := matchesExternalLabels(r.Matchers, s.getExtLset()) 339 if err != nil { 340 return nil, status.Error(codes.InvalidArgument, err.Error()) 341 } 342 343 if !match { 344 return &storepb.LabelValuesResponse{Values: nil}, nil 345 } 346 347 if v := s.getExtLset().Get(r.Label); v != "" { 348 return &storepb.LabelValuesResponse{Values: []string{v}}, nil 349 } 350 351 q, err := s.db.ChunkQuerier(ctx, r.Start, r.End) 352 if err != nil { 353 return nil, status.Error(codes.Internal, err.Error()) 354 } 355 defer runutil.CloseWithLogOnErr(s.logger, q, "close tsdb querier label values") 356 357 res, _, err := q.LabelValues(r.Label, matchers...) 358 if err != nil { 359 return nil, status.Error(codes.Internal, err.Error()) 360 } 361 362 // Label values can come from a postings table of a memory-mapped block which can be deleted during 363 // head compaction. Since we close the block querier before we return from the function, 364 // we need to copy label values to make sure the client still has access to the data when 365 // a block is deleted. 366 values := make([]string, len(res)) 367 for i := range res { 368 values[i] = strings.Clone(res[i]) 369 } 370 371 return &storepb.LabelValuesResponse{Values: values}, nil 372 }