github.com/siglens/siglens@v0.0.0-20240328180423-f7ce9ae441ed/pkg/segment/structs/metricsstructs.go (about) 1 /* 2 Copyright 2023. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package structs 18 19 import ( 20 "math" 21 "os" 22 "path" 23 "sync/atomic" 24 25 pql "github.com/influxdata/promql/v2" 26 27 dtu "github.com/siglens/siglens/pkg/common/dtypeutils" 28 "github.com/siglens/siglens/pkg/segment/utils" 29 toputils "github.com/siglens/siglens/pkg/utils" 30 log "github.com/sirupsen/logrus" 31 ) 32 33 /* 34 Struct to represent a single metrics query request. 35 */ 36 type MetricsQuery struct { 37 MetricName string // metric name to query for. 38 HashedMName uint64 39 Aggregator Aggreation 40 Downsampler Downsampler 41 TagsFilters []*TagsFilter // all tags filters to apply 42 SelectAllSeries bool //flag to select all series - for promQl 43 44 reordered bool // if the tags filters have been reordered 45 numStarFilters int // index such that TagsFilters[:numStarFilters] are all star filters 46 OrgId uint64 // organization id 47 } 48 49 type Aggreation struct { 50 AggregatorFunction utils.AggregateFunctions //aggregator function 51 RangeFunction utils.RangeFunctions //range function to apply, only one of these will be non nil 52 FuncConstant float64 53 } 54 55 type Downsampler struct { 56 Interval int 57 Unit string 58 CFlag bool 59 Aggregator Aggreation 60 } 61 62 /* 63 Represents a single tag filter for a metric query 64 */ 65 type TagsFilter struct { 66 TagKey string 67 RawTagValue interface{} //change it to utils.DtypeEnclosure later 68 HashTagValue uint64 69 TagOperator utils.TagOperator 70 LogicalOperator utils.LogicalOperator 71 } 72 73 type MetricsQueryResponse struct { 74 MetricName string `json:"metric"` 75 Tags map[string]string `json:"tags"` 76 Dps map[uint32]float64 `json:"dps"` 77 } 78 79 type Label struct { 80 Name, Value string 81 } 82 83 type Data struct { 84 ResultType pql.ValueType `json:"resultType"` 85 Result []pql.Series `json:"series,omitempty"` 86 } 87 type MetricsQueryResponsePromQl struct { 88 Status string `json:"status"` //success/error 89 Data Data `json:"data"` 90 ErrorType string `json:"errorType"` 91 Error string `json:"error"` 92 Warnings []string `json:"warnings"` 93 } 94 95 /* 96 Struct to represent the metrics arithmetic request and its corresponding timerange 97 */ 98 type QueryArithmetic struct { 99 OperationId uint64 100 LHS uint64 101 RHS uint64 102 ConstantOp bool 103 Operation utils.ArithmeticOperator 104 Constant float64 105 // maps groupid to a map of ts to value. This aggregates DsResults based on the aggregation function 106 Results map[string]map[uint32]float64 107 OperatedState bool //true if operation has been executed 108 } 109 110 /* 111 Struct to represent the metrics query request and its corresponding timerange 112 */ 113 type MetricsQueryRequest struct { 114 TimeRange dtu.MetricsTimeRange 115 MetricsQuery MetricsQuery 116 } 117 118 type OTSDBMetricsQueryExpTime struct { 119 Start interface{} `json:"start"` 120 End interface{} `json:"end"` 121 Timezone string `json:"timezone"` 122 Aggregator string `json:"aggregator"` 123 Downsampler OTSDBMetricsQueryExpDownsampler `json:"downsampler"` 124 } 125 126 type OTSDBMetricsQueryExpDownsampler struct { 127 Interval string `json:"interval"` 128 Aggregator string `json:"aggregator"` 129 } 130 131 type OTSDBMetricsQueryExpTags struct { 132 Type string `json:"type"` 133 Tagk string `json:"tagk"` 134 Filter string `json:"filter"` 135 GroupBy bool `json:"groupBy"` 136 } 137 138 type OTSDBMetricsQueryExpFilter struct { 139 Tags []OTSDBMetricsQueryExpTags `json:"tags"` 140 Id string `json:"id"` 141 } 142 143 type OTSDBMetricsQueryExpMetric struct { 144 Id string `json:"id"` 145 MetricName string `json:"metric"` 146 Filter string `json:"filter"` 147 Aggregator string `json:"aggregator"` 148 FillPolicy map[string]string `json:"fillPolicy"` 149 } 150 151 type OTSDBMetricsQueryExpressions struct { 152 Id string `json:"id"` 153 Exp string `json:"exp"` 154 } 155 156 type OTSDBMetricsQueryExpOutput struct { 157 Id string `json:"id"` 158 Alias string `json:"alias"` 159 } 160 161 type OTSDBMetricsQueryExpRequest struct { 162 Time OTSDBMetricsQueryExpTime `json:"time"` 163 Filters []OTSDBMetricsQueryExpFilter `json:"filters"` 164 Metrics []OTSDBMetricsQueryExpMetric `json:"metrics"` 165 Expressions []OTSDBMetricsQueryExpressions `json:"expressions"` 166 Outputs []OTSDBMetricsQueryExpOutput `json:"outputs"` 167 } 168 169 type MetricsSearchRequest struct { 170 MetricsKeyBaseDir string 171 BlocksToSearch map[uint16]bool 172 Parallelism uint 173 QueryType SegType 174 AllTagKeys map[string]bool 175 } 176 177 /* 178 NOTE: Change the value oF SIZE_OF_MBSUM each time this struct is updated 179 */ 180 type MBlockSummary struct { 181 Blknum uint16 182 HighTs uint32 183 LowTs uint32 184 } 185 186 func (mbs *MBlockSummary) Reset() { 187 mbs.Blknum = 0 188 mbs.HighTs = 0 189 mbs.LowTs = math.MaxUint32 190 } 191 192 /* 193 Fixes the order of tags filters to be in the following order: 194 1. * tag filters 195 2. other tag filters 196 */ 197 func (mq *MetricsQuery) ReorderTagFilters() { 198 if mq.reordered { 199 return 200 } 201 starTags := make([]*TagsFilter, 0, len(mq.TagsFilters)) 202 otherTags := make([]*TagsFilter, 0, len(mq.TagsFilters)) 203 for _, tf := range mq.TagsFilters { 204 if tagVal, ok := tf.RawTagValue.(string); ok && tagVal == "*" { 205 starTags = append(starTags, tf) 206 } else { 207 otherTags = append(otherTags, tf) 208 } 209 } 210 mq.TagsFilters = append(starTags, otherTags...) 211 mq.reordered = true 212 mq.numStarFilters = len(starTags) 213 } 214 215 func (mq *MetricsQuery) GetNumStarFilters() int { 216 mq.ReorderTagFilters() 217 return mq.numStarFilters 218 } 219 220 const SIZE_OF_MBSUM = 10 // 2 + 4 + 4 221 222 func (ds *Downsampler) GetIntervalTimeInSeconds() uint32 { 223 intervalTime := uint32(0) 224 switch ds.Unit { 225 case "s": 226 intervalTime += 1 227 case "m": 228 intervalTime += 60 229 case "h": 230 intervalTime += 3600 231 case "d": 232 intervalTime += 86400 233 case "w": 234 intervalTime += 86400 * 7 235 case "n": 236 intervalTime += 86400 * 30 237 case "y": 238 intervalTime += 86400 * 365 239 default: 240 log.Error("GetIntervalTimeInSeconds: invalid time format") 241 return 0 242 } 243 244 return uint32(ds.Interval) * intervalTime 245 } 246 247 /* 248 Format of block summary file 249 [version - 1 byte][blk num - 2 bytes][high ts - 4 bytes][low ts - 4 bytes] 250 */ 251 func (mbs *MBlockSummary) FlushSummary(fName string) ([]byte, error) { 252 var flag bool = false 253 if _, err := os.Stat(fName); os.IsNotExist(err) { 254 err := os.MkdirAll(path.Dir(fName), os.FileMode(0764)) 255 flag = true 256 if err != nil { 257 log.Errorf("Failed to create directory at %s: %v", path.Dir(fName), err) 258 return nil, err 259 } 260 } 261 fd, err := os.OpenFile(fName, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644) 262 if err != nil { 263 log.Errorf("writeBlockSummary: open failed fname=%v, err=%v", fName, err) 264 return nil, err 265 } 266 defer fd.Close() 267 idx := 0 268 var mBlkSum []byte 269 // hard coded byte size for [version][blk num][high Ts][low Ts] when file is created 270 if flag { 271 mBlkSum = make([]byte, 19) 272 copy(mBlkSum[idx:], utils.VERSION_MBLOCKSUMMARY) 273 idx += 1 274 // hard coded byte size for [blk num][high Ts][low Ts] 275 } else { 276 mBlkSum = make([]byte, 18) 277 } 278 copy(mBlkSum[idx:], toputils.Uint16ToBytesLittleEndian(mbs.Blknum)) 279 idx += 2 280 copy(mBlkSum[idx:], toputils.Uint32ToBytesLittleEndian(mbs.HighTs)) 281 idx += 8 282 copy(mBlkSum[idx:], toputils.Uint32ToBytesLittleEndian(mbs.LowTs)) 283 284 if _, err := fd.Write(mBlkSum); err != nil { 285 log.Errorf("writeBlockSummary: write failed blockSummaryFname=%v, err=%v", fName, err) 286 return nil, err 287 } 288 return mBlkSum, nil 289 } 290 291 func (mbs *MBlockSummary) UpdateTimeRange(ts uint32) { 292 if ts > mbs.HighTs { 293 atomic.StoreUint32(&mbs.HighTs, ts) 294 } 295 if ts < mbs.LowTs { 296 atomic.StoreUint32(&mbs.LowTs, ts) 297 } 298 }