github.com/m3db/m3@v1.5.0/src/query/storage/types.go (about) 1 // Copyright (c) 2018 Uber Technologies, Inc. 2 // 3 // Permission is hereby granted, free of charge, to any person obtaining a copy 4 // of this software and associated documentation files (the "Software"), to deal 5 // in the Software without restriction, including without limitation the rights 6 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 // copies of the Software, and to permit persons to whom the Software is 8 // furnished to do so, subject to the following conditions: 9 // 10 // The above copyright notice and this permission notice shall be included in 11 // all copies or substantial portions of the Software. 12 // 13 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 // THE SOFTWARE. 20 21 package storage 22 23 import ( 24 "context" 25 "errors" 26 "fmt" 27 "time" 28 29 "github.com/uber-go/tally" 30 31 "github.com/m3db/m3/src/dbnode/encoding" 32 "github.com/m3db/m3/src/dbnode/topology" 33 "github.com/m3db/m3/src/metrics/policy" 34 "github.com/m3db/m3/src/query/block" 35 "github.com/m3db/m3/src/query/generated/proto/prompb" 36 "github.com/m3db/m3/src/query/models" 37 "github.com/m3db/m3/src/query/storage/m3/consolidators" 38 "github.com/m3db/m3/src/query/storage/m3/storagemetadata" 39 "github.com/m3db/m3/src/query/ts" 40 xtime "github.com/m3db/m3/src/x/time" 41 ) 42 43 var errWriteQueryNoDatapoints = errors.New("write query with no datapoints") 44 45 // Type describes the type of storage. 46 type Type int 47 48 const ( 49 // TypeLocalDC is for storages that reside in the local datacenter. 50 TypeLocalDC Type = iota 51 // TypeRemoteDC is for storages that reside in a remote datacenter. 52 TypeRemoteDC 53 // TypeMultiDC is for storages that will aggregate multiple datacenters. 54 TypeMultiDC 55 ) 56 57 // ErrorBehavior describes what this storage type should do on error. This is 58 // used for determining how to proceed when encountering an error in a fanout 59 // storage situation. 60 type ErrorBehavior uint8 61 62 const ( 63 // BehaviorFail is for storages that should fail the entire query when queries 64 // against this storage fail. 65 BehaviorFail ErrorBehavior = iota 66 // BehaviorWarn is for storages that should only warn of incomplete results on 67 // failure. 68 BehaviorWarn 69 // BehaviorContainer is for storages that contain substorages. It is necessary 70 // to look at the returned error to determine if it's a failing error or 71 // a warning error. 72 BehaviorContainer 73 ) 74 75 // Storage provides an interface for reading and writing to the tsdb. 76 type Storage interface { 77 Querier 78 Appender 79 // Type identifies the type of the underlying storage. 80 Type() Type 81 // Close is used to close the underlying storage and free up resources. 82 Close() error 83 // ErrorBehavior dictates what fanout storage should do when this storage 84 // encounters an error. 85 ErrorBehavior() ErrorBehavior 86 // Name gives the plaintext name for this storage, used for logging purposes. 87 Name() string 88 } 89 90 // Query is an interface for a M3DB query. 91 type Query interface { 92 fmt.Stringer 93 // nolint 94 query() 95 } 96 97 func (q *FetchQuery) query() {} 98 func (q *WriteQuery) query() {} 99 100 // FetchQuery represents the input query which is fetched from M3DB. 101 type FetchQuery struct { 102 Raw string 103 TagMatchers models.Matchers `json:"matchers"` 104 Start time.Time `json:"start"` 105 End time.Time `json:"end"` 106 Interval time.Duration `json:"interval"` 107 } 108 109 // FetchOptions represents the options for fetch query. 110 type FetchOptions struct { 111 // Remote is set when this fetch is originated by a remote grpc call. 112 Remote bool 113 // SeriesLimit is the maximum number of series to return. 114 SeriesLimit int 115 // InstanceMultiple is how much to increase the per database instance series limit. 116 InstanceMultiple float32 117 // DocsLimit is the maximum number of docs to return. 118 DocsLimit int 119 // RangeLimit is the maximum time range to return. 120 RangeLimit time.Duration 121 // ReturnedSeriesLimit is the maximum number of series to return. 122 ReturnedSeriesLimit int 123 // ReturnedDatapointsLimit is the maximum number of datapoints to return. 124 ReturnedDatapointsLimit int 125 // ReturnedSeriesMetadataLimit is the maximum number of series metadata to return. 126 ReturnedSeriesMetadataLimit int 127 // RequireExhaustive results in an error if the query exceeds the series limit. 128 RequireExhaustive bool 129 // RequireNoWait results in an error if the query execution must wait for permits. 130 RequireNoWait bool 131 // MaxMetricMetadataStats is the maximum number of metric metadata stats to return. 132 MaxMetricMetadataStats int 133 // BlockType is the block type that the fetch function returns. 134 BlockType models.FetchedBlockType 135 // FanoutOptions are the options for the fetch namespace fanout. 136 FanoutOptions *FanoutOptions 137 // RestrictQueryOptions restricts the fetch to a specific set of 138 // conditions. 139 RestrictQueryOptions *RestrictQueryOptions 140 // Step is the configured step size. 141 Step time.Duration 142 // LookbackDuration if set overrides the default lookback duration. 143 LookbackDuration *time.Duration 144 // Scope is used to report metrics about the fetch. 145 Scope tally.Scope 146 // Timeout is the timeout for the request. 147 Timeout time.Duration 148 // ReadConsistencyLevel defines the read consistency for the fetch. 149 ReadConsistencyLevel *topology.ReadConsistencyLevel 150 // IterateEqualTimestampStrategy provides the conflict resolution strategy for the same timestamp. 151 IterateEqualTimestampStrategy *encoding.IterateEqualTimestampStrategy 152 // Source is the source for the query. 153 Source []byte 154 155 RelatedQueryOptions *RelatedQueryOptions 156 } 157 158 // QueryTimespan represents the start and end time of a query 159 type QueryTimespan struct { 160 Start xtime.UnixNano 161 End xtime.UnixNano 162 } 163 164 // RelatedQueryOptions describes the timespan of any related queries the client might be making 165 // This is used to align the resolution of returned data across all queries. 166 type RelatedQueryOptions struct { 167 Timespans []QueryTimespan 168 } 169 170 // FanoutOptions describes which namespaces should be fanned out to for 171 // the query. 172 type FanoutOptions struct { 173 // FanoutUnaggregated describes the fanout options for 174 // unaggregated namespaces. 175 FanoutUnaggregated FanoutOption 176 // FanoutAggregated describes the fanout options for 177 // aggregated namespaces. 178 FanoutAggregated FanoutOption 179 // FanoutAggregatedOptimized describes the fanout options for the 180 // aggregated namespace optimization. 181 FanoutAggregatedOptimized FanoutOption 182 } 183 184 // FanoutOption describes the fanout option. 185 type FanoutOption uint 186 187 const ( 188 // FanoutDefault defaults to the fanout option. 189 FanoutDefault FanoutOption = iota 190 // FanoutForceDisable forces disabling fanout. 191 FanoutForceDisable 192 // FanoutForceEnable forces enabling fanout. 193 FanoutForceEnable 194 ) 195 196 // RestrictByType are specific restrictions to stick to a single data type. 197 type RestrictByType struct { 198 // MetricsType restricts the type of metrics being returned. 199 MetricsType storagemetadata.MetricsType 200 // StoragePolicy is required if metrics type is not unaggregated 201 // to specify which storage policy metrics should be returned from. 202 StoragePolicy policy.StoragePolicy 203 } 204 205 // RestrictByTag are specific restrictions to enforce behavior for given 206 // tags. 207 type RestrictByTag struct { 208 // Restrict is a set of override matchers to apply to a fetch 209 // regardless of the existing fetch matchers, they should replace any 210 // existing matchers part of a fetch if they collide. 211 Restrict models.Matchers 212 // Strip is a set of tag names to strip from the response. 213 // 214 // NB: If this is unset, but Restrict is set, all tag names appearing in any 215 // of the Restrict matchers are removed. 216 Strip [][]byte 217 } 218 219 // RestrictQueryOptions restricts the query to a specific set of conditions. 220 type RestrictQueryOptions struct { 221 // RestrictByType are specific restrictions to stick to a single data type. 222 RestrictByType *RestrictByType 223 // RestrictByTag are specific restrictions to enforce behavior for given 224 // tags. 225 RestrictByTag *RestrictByTag 226 // RestrictByTypes are specific restrictions to query from specified data 227 // types. 228 RestrictByTypes []*RestrictByType 229 } 230 231 // Querier handles queries against a storage. 232 type Querier interface { 233 // FetchProm fetches decompressed timeseries data based on a query in a 234 // Prometheus-compatible format. 235 // TODO: take in an accumulator of some sort rather than returning 236 // necessarily as a Prom result. 237 FetchProm( 238 ctx context.Context, 239 query *FetchQuery, 240 options *FetchOptions, 241 ) (PromResult, error) 242 243 // FetchBlocks fetches timeseries as blocks based on a query. 244 FetchBlocks( 245 ctx context.Context, 246 query *FetchQuery, 247 options *FetchOptions, 248 ) (block.Result, error) 249 250 FetchCompressed( 251 ctx context.Context, 252 query *FetchQuery, 253 options *FetchOptions, 254 ) (consolidators.MultiFetchResult, error) 255 256 // SearchSeries returns series IDs matching the current query. 257 SearchSeries( 258 ctx context.Context, 259 query *FetchQuery, 260 options *FetchOptions, 261 ) (*SearchResults, error) 262 263 // CompleteTags returns autocompleted tag results. 264 CompleteTags( 265 ctx context.Context, 266 query *CompleteTagsQuery, 267 options *FetchOptions, 268 ) (*consolidators.CompleteTagsResult, error) 269 270 // QueryStorageMetadataAttributes returns the storage metadata 271 // attributes for a query. 272 QueryStorageMetadataAttributes( 273 ctx context.Context, 274 queryStart, queryEnd time.Time, 275 opts *FetchOptions, 276 ) ([]storagemetadata.Attributes, error) 277 } 278 279 // WriteQuery represents the input timeseries that is written to the database. 280 // TODO: rename WriteQuery to WriteRequest or something similar. 281 type WriteQuery struct { 282 // opts as a field allows the options to be unexported 283 // and the Validate method on WriteQueryOptions to be reused. 284 opts WriteQueryOptions 285 } 286 287 // WriteQueryOptions is a set of options to use to construct a write query. 288 // These are passed by options so that they can be validated when creating 289 // a write query, which helps knowing a constructed write query is valid. 290 type WriteQueryOptions struct { 291 Tags models.Tags 292 Datapoints ts.Datapoints 293 Unit xtime.Unit 294 Annotation []byte 295 Attributes storagemetadata.Attributes 296 } 297 298 // CompleteTagsQuery represents a query that returns an autocompleted 299 // set of tags. 300 type CompleteTagsQuery struct { 301 // CompleteNameOnly indicates if the query should return only tag names, or 302 // tag names and values. 303 CompleteNameOnly bool 304 // FilterNameTags is a list of tags to filter results by. If this is empty, no 305 // filtering is applied. 306 FilterNameTags [][]byte 307 // TagMatchers is the search criteria for the query. 308 TagMatchers models.Matchers 309 // Start is the inclusive start for the query. 310 Start xtime.UnixNano 311 // End is the exclusive end for the query. 312 End xtime.UnixNano 313 } 314 315 // SeriesMatchQuery represents a query that returns a set of series 316 // that match the query. 317 type SeriesMatchQuery struct { 318 // TagMatchers is the search criteria for the query. 319 TagMatchers []models.Matchers 320 // Start is the inclusive start for the query. 321 Start time.Time 322 // End is the exclusive end for the query. 323 End time.Time 324 } 325 326 func (q *CompleteTagsQuery) String() string { 327 if q.CompleteNameOnly { 328 return fmt.Sprintf("completing tag name for query %s", q.TagMatchers) 329 } 330 331 return fmt.Sprintf("completing tag values for query %s", q.TagMatchers) 332 } 333 334 // Appender provides batched appends against a storage. 335 type Appender interface { 336 // Write writes a batched set of datapoints to storage based on the provided 337 // query. 338 Write(ctx context.Context, query *WriteQuery) error 339 } 340 341 // SearchResults is the result from a search. 342 type SearchResults struct { 343 // Metrics is the list of search results. 344 Metrics models.Metrics 345 // Metadata describes any metadata for the Fetch operation. 346 Metadata block.ResultMetadata 347 } 348 349 // FetchResult provides a decompressed fetch result and meta information. 350 type FetchResult struct { 351 // SeriesList is the list of decompressed and computed series after fetch 352 // query execution. 353 SeriesList ts.SeriesList 354 // Metadata describes any metadata for the operation. 355 Metadata block.ResultMetadata 356 } 357 358 // PromResult is a Prometheus-compatible result type. 359 type PromResult struct { 360 // PromResult is the result, in Prometheus protobuf format. 361 PromResult *prompb.QueryResult 362 // ResultMetadata is the metadata for the result. 363 Metadata block.ResultMetadata 364 } 365 366 // PromConvertOptions are options controlling the conversion of raw series iterators 367 // to a Prometheus-compatible result. 368 type PromConvertOptions interface { 369 // SetResolutionThresholdForCounterNormalization sets resolution 370 // starting from which (inclusive) a normalization of counter values is performed. 371 SetResolutionThresholdForCounterNormalization(time.Duration) PromConvertOptions 372 373 // ResolutionThresholdForCounterNormalization returns resolution 374 // starting from which (inclusive) a normalization of counter values is performed. 375 ResolutionThresholdForCounterNormalization() time.Duration 376 377 // SetValueDecreaseTolerance sets relative tolerance against decoded time series value decrease. 378 SetValueDecreaseTolerance(value float64) PromConvertOptions 379 380 // ValueDecreaseTolerance returns relative tolerance against decoded time series value decrease. 381 ValueDecreaseTolerance() float64 382 383 // SetValueDecreaseToleranceUntil sets the timestamp (exclusive) until which the tolerance applies. 384 SetValueDecreaseToleranceUntil(value xtime.UnixNano) PromConvertOptions 385 386 // ValueDecreaseToleranceUntil the timestamp (exclusive) until which the tolerance applies. 387 ValueDecreaseToleranceUntil() xtime.UnixNano 388 }