github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/query/storage/prom_converter.go (about) 1 // Copyright (c) 2019 Uber Technologies, Inc. 2 // 3 // Permission is hereby granted, free of charge, to any person obtaining a copy 4 // of this software and associated documentation files (the "Software"), to deal 5 // in the Software without restriction, including without limitation the rights 6 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 // copies of the Software, and to permit persons to whom the Software is 8 // furnished to do so, subject to the following conditions: 9 // 10 // The above copyright notice and this permission notice shall be included in 11 // all copies or substantial portions of the Software. 12 // 13 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 // THE SOFTWARE. 20 21 package storage 22 23 import ( 24 "context" 25 "sync" 26 "time" 27 28 "github.com/m3db/m3/src/dbnode/encoding" 29 "github.com/m3db/m3/src/dbnode/generated/proto/annotation" 30 "github.com/m3db/m3/src/dbnode/ts" 31 "github.com/m3db/m3/src/query/block" 32 "github.com/m3db/m3/src/query/generated/proto/prompb" 33 "github.com/m3db/m3/src/query/models" 34 "github.com/m3db/m3/src/query/storage/m3/consolidators" 35 xerrors "github.com/m3db/m3/src/x/errors" 36 xsync "github.com/m3db/m3/src/x/sync" 37 xtime "github.com/m3db/m3/src/x/time" 38 ) 39 40 const initRawFetchAllocSize = 32 41 42 func iteratorToPromResult( 43 iter encoding.SeriesIterator, 44 tags models.Tags, 45 maxResolution time.Duration, 46 promConvertOptions PromConvertOptions, 47 ) (*prompb.TimeSeries, error) { 48 var ( 49 resolution = xtime.UnixNano(maxResolution) 50 resolutionThreshold = promConvertOptions.ResolutionThresholdForCounterNormalization() 51 52 valueDecreaseTolerance = promConvertOptions.ValueDecreaseTolerance() 53 valueDecreaseToleranceUntil = promConvertOptions.ValueDecreaseToleranceUntil() 54 55 firstDP = true 56 handleResets = false 57 annotationPayload annotation.Payload 58 59 cumulativeSum float64 60 prevDP ts.Datapoint 61 62 samples = make([]prompb.Sample, 0, initRawFetchAllocSize) 63 ) 64 65 for iter.Next() { 66 dp, _, _ := iter.Current() 67 68 if valueDecreaseTolerance > 0 && dp.TimestampNanos.Before(valueDecreaseToleranceUntil) { 69 if !firstDP && dp.Value < prevDP.Value && dp.Value > prevDP.Value*(1-valueDecreaseTolerance) { 70 dp.Value = prevDP.Value 71 } 72 } 73 74 if firstDP && maxResolution >= resolutionThreshold { 75 firstAnnotation := iter.FirstAnnotation() 76 if len(firstAnnotation) > 0 { 77 if err := annotationPayload.Unmarshal(firstAnnotation); err != nil { 78 return nil, err 79 } 80 handleResets = annotationPayload.OpenMetricsHandleValueResets 81 } 82 } 83 84 if handleResets { 85 if dp.TimestampNanos/resolution != prevDP.TimestampNanos/resolution && !firstDP { 86 // reached next resolution window, emit previous DP 87 samples = append(samples, prompb.Sample{ 88 Timestamp: TimeToPromTimestamp(prevDP.TimestampNanos), 89 Value: cumulativeSum, 90 }) 91 } 92 93 if dp.Value < prevDP.Value { // counter reset 94 cumulativeSum += dp.Value 95 } else { 96 cumulativeSum += dp.Value - prevDP.Value 97 } 98 } else { 99 samples = append(samples, prompb.Sample{ 100 Timestamp: TimeToPromTimestamp(dp.TimestampNanos), 101 Value: dp.Value, 102 }) 103 } 104 105 prevDP = dp 106 firstDP = false 107 } 108 109 if err := iter.Err(); err != nil { 110 return nil, err 111 } 112 113 if handleResets { 114 samples = append(samples, prompb.Sample{ 115 Timestamp: TimeToPromTimestamp(prevDP.TimestampNanos), 116 Value: cumulativeSum, 117 }) 118 } 119 120 return &prompb.TimeSeries{ 121 Labels: TagsToPromLabels(tags), 122 Samples: samples, 123 }, nil 124 } 125 126 // Fall back to sequential decompression if unable to decompress concurrently. 127 func toPromSequentially( 128 fetchResult consolidators.SeriesFetchResult, 129 tagOptions models.TagOptions, 130 maxResolution time.Duration, 131 promConvertOptions PromConvertOptions, 132 fetchOptions *FetchOptions, 133 ) (PromResult, error) { 134 meta := block.NewResultMetadata() 135 count := fetchResult.Count() 136 seriesList := make([]*prompb.TimeSeries, 0, count) 137 for i := 0; i < count; i++ { 138 iter, tags, err := fetchResult.IterTagsAtIndex(i, tagOptions) 139 if err != nil { 140 return PromResult{}, err 141 } 142 143 series, err := iteratorToPromResult(iter, tags, maxResolution, promConvertOptions) 144 if err != nil { 145 return PromResult{}, err 146 } 147 148 if len(series.GetSamples()) > 0 { 149 seriesList = append(seriesList, series) 150 } 151 152 if fetchOptions != nil && fetchOptions.MaxMetricMetadataStats > 0 { 153 name, _ := tags.Get(promDefaultName) 154 if len(series.GetSamples()) > 0 { 155 meta.ByName(name).WithSamples++ 156 } else { 157 meta.ByName(name).NoSamples++ 158 } 159 } 160 } 161 162 return PromResult{ 163 PromResult: &prompb.QueryResult{ 164 Timeseries: seriesList, 165 }, 166 Metadata: meta, 167 }, nil 168 } 169 170 func toPromConcurrently( 171 ctx context.Context, 172 fetchResult consolidators.SeriesFetchResult, 173 readWorkerPool xsync.PooledWorkerPool, 174 tagOptions models.TagOptions, 175 maxResolution time.Duration, 176 promConvertOptions PromConvertOptions, 177 fetchOptions *FetchOptions, 178 ) (PromResult, error) { 179 count := fetchResult.Count() 180 var ( 181 seriesList = make([]*prompb.TimeSeries, count) 182 183 wg sync.WaitGroup 184 multiErr xerrors.MultiError 185 mu sync.Mutex 186 ) 187 188 fastWorkerPool := readWorkerPool.FastContextCheck(100) 189 for i := 0; i < count; i++ { 190 i := i 191 192 iter, tags, err := fetchResult.IterTagsAtIndex(i, tagOptions) 193 if err != nil { 194 mu.Lock() 195 multiErr = multiErr.Add(err) 196 mu.Unlock() 197 break 198 } 199 200 wg.Add(1) 201 available := fastWorkerPool.GoWithContext(ctx, func() { 202 defer wg.Done() 203 series, err := iteratorToPromResult(iter, tags, maxResolution, promConvertOptions) 204 if err != nil { 205 mu.Lock() 206 multiErr = multiErr.Add(err) 207 mu.Unlock() 208 } 209 210 seriesList[i] = series 211 }) 212 if !available { 213 wg.Done() 214 mu.Lock() 215 multiErr = multiErr.Add(ctx.Err()) 216 mu.Unlock() 217 break 218 } 219 } 220 221 wg.Wait() 222 if err := multiErr.LastError(); err != nil { 223 return PromResult{}, err 224 } 225 226 // Filter out empty series inplace. 227 meta := block.NewResultMetadata() 228 filteredList := seriesList[:0] 229 for _, series := range seriesList { 230 if len(series.GetSamples()) > 0 { 231 filteredList = append(filteredList, series) 232 } 233 234 if fetchOptions != nil && fetchOptions.MaxMetricMetadataStats > 0 { 235 name := metricNameFromLabels(series.Labels) 236 if len(series.GetSamples()) > 0 { 237 meta.ByName(name).WithSamples++ 238 } else { 239 meta.ByName(name).NoSamples++ 240 } 241 } 242 } 243 244 return PromResult{ 245 PromResult: &prompb.QueryResult{ 246 Timeseries: filteredList, 247 }, 248 Metadata: meta, 249 }, nil 250 } 251 252 func seriesIteratorsToPromResult( 253 ctx context.Context, 254 fetchResult consolidators.SeriesFetchResult, 255 readWorkerPool xsync.PooledWorkerPool, 256 tagOptions models.TagOptions, 257 maxResolution time.Duration, 258 promConvertOptions PromConvertOptions, 259 fetchOptions *FetchOptions, 260 ) (PromResult, error) { 261 if readWorkerPool == nil { 262 return toPromSequentially(fetchResult, tagOptions, maxResolution, 263 promConvertOptions, fetchOptions) 264 } 265 266 return toPromConcurrently(ctx, fetchResult, readWorkerPool, tagOptions, maxResolution, 267 promConvertOptions, fetchOptions) 268 } 269 270 // SeriesIteratorsToPromResult converts raw series iterators directly to a 271 // Prometheus-compatible result. 272 func SeriesIteratorsToPromResult( 273 ctx context.Context, 274 fetchResult consolidators.SeriesFetchResult, 275 readWorkerPool xsync.PooledWorkerPool, 276 tagOptions models.TagOptions, 277 promConvertOptions PromConvertOptions, 278 fetchOptions *FetchOptions, 279 ) (PromResult, error) { 280 defer fetchResult.Close() 281 if err := fetchResult.Verify(); err != nil { 282 return PromResult{}, err 283 } 284 285 var maxResolution time.Duration 286 for _, res := range fetchResult.Metadata.Resolutions { 287 if res > maxResolution { 288 maxResolution = res 289 } 290 } 291 292 promResult, err := seriesIteratorsToPromResult(ctx, fetchResult, 293 readWorkerPool, tagOptions, maxResolution, promConvertOptions, fetchOptions) 294 // Combine the fetchResult metadata into any metadata that was already 295 // computed for this promResult. 296 promResult.Metadata = promResult.Metadata.CombineMetadata(fetchResult.Metadata) 297 298 return promResult, err 299 }