github.com/muhammadn/cortex@v1.9.1-0.20220510110439-46bb7000d03d/pkg/configs/legacy_promql/functions.go (about) 1 // Copyright 2015 The Prometheus Authors 2 // Licensed under the Apache License, Version 2.0 (the "License"); 3 // you may not use this file except in compliance with the License. 4 // You may obtain a copy of the License at 5 // 6 // http://www.apache.org/licenses/LICENSE-2.0 7 // 8 // Unless required by applicable law or agreed to in writing, software 9 // distributed under the License is distributed on an "AS IS" BASIS, 10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 package promql 15 16 import ( 17 "fmt" 18 "math" 19 "regexp" 20 "sort" 21 "strconv" 22 "strings" 23 "time" 24 25 "github.com/prometheus/common/model" 26 "github.com/prometheus/prometheus/pkg/labels" 27 ) 28 29 // Function represents a function of the expression language and is 30 // used by function nodes. 31 type Function struct { 32 Name string 33 ArgTypes []ValueType 34 Variadic int 35 ReturnType ValueType 36 37 // vals is a list of the evaluated arguments for the function call. 38 // For range vectors it will be a Matrix with one series, instant vectors a 39 // Vector, scalars a Vector with one series whose value is the scalar 40 // value,and nil for strings. 41 // args are the original arguments to the function, where you can access 42 // matrixSelectors, vectorSelectors, and StringLiterals. 43 // enh.out is a pre-allocated empty vector that you may use to accumulate 44 // output before returning it. The vectors in vals should not be returned.a 45 // Range vector functions need only return a vector with the right value, 46 // the metric and timestamp are not neded. 47 // Instant vector functions need only return a vector with the right values and 48 // metrics, the timestamp are not needed. 49 // Scalar results should be returned as the value of a sample in a Vector. 50 Call func(vals []Value, args Expressions, enh *EvalNodeHelper) Vector 51 } 52 53 // === time() float64 === 54 func funcTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 55 return Vector{Sample{Point: Point{ 56 V: float64(enh.ts) / 1000, 57 }}} 58 } 59 60 // extrapolatedRate is a utility function for rate/increase/delta. 61 // It calculates the rate (allowing for counter resets if isCounter is true), 62 // extrapolates if the first/last sample is close to the boundary, and returns 63 // the result as either per-second (if isRate is true) or overall. 64 func extrapolatedRate(vals []Value, args Expressions, enh *EvalNodeHelper, isCounter bool, isRate bool) Vector { 65 ms := args[0].(*MatrixSelector) 66 67 var ( 68 matrix = vals[0].(Matrix) 69 rangeStart = enh.ts - durationMilliseconds(ms.Range+ms.Offset) 70 rangeEnd = enh.ts - durationMilliseconds(ms.Offset) 71 ) 72 73 for _, samples := range matrix { 74 // No sense in trying to compute a rate without at least two points. Drop 75 // this Vector element. 76 if len(samples.Points) < 2 { 77 continue 78 } 79 var ( 80 counterCorrection float64 81 lastValue float64 82 ) 83 for _, sample := range samples.Points { 84 if isCounter && sample.V < lastValue { 85 counterCorrection += lastValue 86 } 87 lastValue = sample.V 88 } 89 resultValue := lastValue - samples.Points[0].V + counterCorrection 90 91 // Duration between first/last samples and boundary of range. 92 durationToStart := float64(samples.Points[0].T-rangeStart) / 1000 93 durationToEnd := float64(rangeEnd-samples.Points[len(samples.Points)-1].T) / 1000 94 95 sampledInterval := float64(samples.Points[len(samples.Points)-1].T-samples.Points[0].T) / 1000 96 averageDurationBetweenSamples := sampledInterval / float64(len(samples.Points)-1) 97 98 if isCounter && resultValue > 0 && samples.Points[0].V >= 0 { 99 // Counters cannot be negative. If we have any slope at 100 // all (i.e. resultValue went up), we can extrapolate 101 // the zero point of the counter. If the duration to the 102 // zero point is shorter than the durationToStart, we 103 // take the zero point as the start of the series, 104 // thereby avoiding extrapolation to negative counter 105 // values. 106 durationToZero := sampledInterval * (samples.Points[0].V / resultValue) 107 if durationToZero < durationToStart { 108 durationToStart = durationToZero 109 } 110 } 111 112 // If the first/last samples are close to the boundaries of the range, 113 // extrapolate the result. This is as we expect that another sample 114 // will exist given the spacing between samples we've seen thus far, 115 // with an allowance for noise. 116 extrapolationThreshold := averageDurationBetweenSamples * 1.1 117 extrapolateToInterval := sampledInterval 118 119 if durationToStart < extrapolationThreshold { 120 extrapolateToInterval += durationToStart 121 } else { 122 extrapolateToInterval += averageDurationBetweenSamples / 2 123 } 124 if durationToEnd < extrapolationThreshold { 125 extrapolateToInterval += durationToEnd 126 } else { 127 extrapolateToInterval += averageDurationBetweenSamples / 2 128 } 129 resultValue = resultValue * (extrapolateToInterval / sampledInterval) 130 if isRate { 131 resultValue = resultValue / ms.Range.Seconds() 132 } 133 134 enh.out = append(enh.out, Sample{ 135 Point: Point{V: resultValue}, 136 }) 137 } 138 return enh.out 139 } 140 141 // === delta(Matrix ValueTypeMatrix) Vector === 142 func funcDelta(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 143 return extrapolatedRate(vals, args, enh, false, false) 144 } 145 146 // === rate(node ValueTypeMatrix) Vector === 147 func funcRate(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 148 return extrapolatedRate(vals, args, enh, true, true) 149 } 150 151 // === increase(node ValueTypeMatrix) Vector === 152 func funcIncrease(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 153 return extrapolatedRate(vals, args, enh, true, false) 154 } 155 156 // === irate(node ValueTypeMatrix) Vector === 157 func funcIrate(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 158 return instantValue(vals, enh.out, true) 159 } 160 161 // === idelta(node model.ValMatric) Vector === 162 func funcIdelta(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 163 return instantValue(vals, enh.out, false) 164 } 165 166 func instantValue(vals []Value, out Vector, isRate bool) Vector { 167 for _, samples := range vals[0].(Matrix) { 168 // No sense in trying to compute a rate without at least two points. Drop 169 // this Vector element. 170 if len(samples.Points) < 2 { 171 continue 172 } 173 174 lastSample := samples.Points[len(samples.Points)-1] 175 previousSample := samples.Points[len(samples.Points)-2] 176 177 var resultValue float64 178 if isRate && lastSample.V < previousSample.V { 179 // Counter reset. 180 resultValue = lastSample.V 181 } else { 182 resultValue = lastSample.V - previousSample.V 183 } 184 185 sampledInterval := lastSample.T - previousSample.T 186 if sampledInterval == 0 { 187 // Avoid dividing by 0. 188 continue 189 } 190 191 if isRate { 192 // Convert to per-second. 193 resultValue /= float64(sampledInterval) / 1000 194 } 195 196 out = append(out, Sample{ 197 Point: Point{V: resultValue}, 198 }) 199 } 200 return out 201 } 202 203 // Calculate the trend value at the given index i in raw data d. 204 // This is somewhat analogous to the slope of the trend at the given index. 205 // The argument "s" is the set of computed smoothed values. 206 // The argument "b" is the set of computed trend factors. 207 // The argument "d" is the set of raw input values. 208 func calcTrendValue(i int, sf, tf, s0, s1, b float64) float64 { 209 if i == 0 { 210 return b 211 } 212 213 x := tf * (s1 - s0) 214 y := (1 - tf) * b 215 216 return x + y 217 } 218 219 // Holt-Winters is similar to a weighted moving average, where historical data has exponentially less influence on the current data. 220 // Holt-Winter also accounts for trends in data. The smoothing factor (0 < sf < 1) affects how historical data will affect the current 221 // data. A lower smoothing factor increases the influence of historical data. The trend factor (0 < tf < 1) affects 222 // how trends in historical data will affect the current data. A higher trend factor increases the influence. 223 // of trends. Algorithm taken from https://en.wikipedia.org/wiki/Exponential_smoothing titled: "Double exponential smoothing". 224 func funcHoltWinters(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 225 mat := vals[0].(Matrix) 226 227 // The smoothing factor argument. 228 sf := vals[1].(Vector)[0].V 229 230 // The trend factor argument. 231 tf := vals[2].(Vector)[0].V 232 233 // Sanity check the input. 234 if sf <= 0 || sf >= 1 { 235 panic(fmt.Errorf("invalid smoothing factor. Expected: 0 < sf < 1, got: %f", sf)) 236 } 237 if tf <= 0 || tf >= 1 { 238 panic(fmt.Errorf("invalid trend factor. Expected: 0 < tf < 1, got: %f", tf)) 239 } 240 241 var l int 242 for _, samples := range mat { 243 l = len(samples.Points) 244 245 // Can't do the smoothing operation with less than two points. 246 if l < 2 { 247 continue 248 } 249 250 var s0, s1, b float64 251 // Set initial values. 252 s1 = samples.Points[0].V 253 b = samples.Points[1].V - samples.Points[0].V 254 255 // Run the smoothing operation. 256 var x, y float64 257 for i := 1; i < l; i++ { 258 259 // Scale the raw value against the smoothing factor. 260 x = sf * samples.Points[i].V 261 262 // Scale the last smoothed value with the trend at this point. 263 b = calcTrendValue(i-1, sf, tf, s0, s1, b) 264 y = (1 - sf) * (s1 + b) 265 266 s0, s1 = s1, x+y 267 } 268 269 enh.out = append(enh.out, Sample{ 270 Point: Point{V: s1}, 271 }) 272 } 273 274 return enh.out 275 } 276 277 // === sort(node ValueTypeVector) Vector === 278 func funcSort(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 279 // NaN should sort to the bottom, so take descending sort with NaN first and 280 // reverse it. 281 byValueSorter := vectorByReverseValueHeap(vals[0].(Vector)) 282 sort.Sort(sort.Reverse(byValueSorter)) 283 return Vector(byValueSorter) 284 } 285 286 // === sortDesc(node ValueTypeVector) Vector === 287 func funcSortDesc(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 288 // NaN should sort to the bottom, so take ascending sort with NaN first and 289 // reverse it. 290 byValueSorter := vectorByValueHeap(vals[0].(Vector)) 291 sort.Sort(sort.Reverse(byValueSorter)) 292 return Vector(byValueSorter) 293 } 294 295 // === clamp_max(Vector ValueTypeVector, max Scalar) Vector === 296 func funcClampMax(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 297 vec := vals[0].(Vector) 298 max := vals[1].(Vector)[0].Point.V 299 for _, el := range vec { 300 enh.out = append(enh.out, Sample{ 301 Metric: enh.dropMetricName(el.Metric), 302 Point: Point{V: math.Min(max, el.V)}, 303 }) 304 } 305 return enh.out 306 } 307 308 // === clamp_min(Vector ValueTypeVector, min Scalar) Vector === 309 func funcClampMin(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 310 vec := vals[0].(Vector) 311 min := vals[1].(Vector)[0].Point.V 312 for _, el := range vec { 313 enh.out = append(enh.out, Sample{ 314 Metric: enh.dropMetricName(el.Metric), 315 Point: Point{V: math.Max(min, el.V)}, 316 }) 317 } 318 return enh.out 319 } 320 321 // === round(Vector ValueTypeVector, toNearest=1 Scalar) Vector === 322 func funcRound(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 323 vec := vals[0].(Vector) 324 // round returns a number rounded to toNearest. 325 // Ties are solved by rounding up. 326 toNearest := float64(1) 327 if len(args) >= 2 { 328 toNearest = vals[1].(Vector)[0].Point.V 329 } 330 // Invert as it seems to cause fewer floating point accuracy issues. 331 toNearestInverse := 1.0 / toNearest 332 333 for _, el := range vec { 334 v := math.Floor(el.V*toNearestInverse+0.5) / toNearestInverse 335 enh.out = append(enh.out, Sample{ 336 Metric: enh.dropMetricName(el.Metric), 337 Point: Point{V: v}, 338 }) 339 } 340 return enh.out 341 } 342 343 // === Scalar(node ValueTypeVector) Scalar === 344 func funcScalar(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 345 v := vals[0].(Vector) 346 if len(v) != 1 { 347 return append(enh.out, Sample{ 348 Point: Point{V: math.NaN()}, 349 }) 350 } 351 return append(enh.out, Sample{ 352 Point: Point{V: v[0].V}, 353 }) 354 } 355 356 func aggrOverTime(vals []Value, enh *EvalNodeHelper, aggrFn func([]Point) float64) Vector { 357 mat := vals[0].(Matrix) 358 359 for _, el := range mat { 360 if len(el.Points) == 0 { 361 continue 362 } 363 364 enh.out = append(enh.out, Sample{ 365 Point: Point{V: aggrFn(el.Points)}, 366 }) 367 } 368 return enh.out 369 } 370 371 // === avg_over_time(Matrix ValueTypeMatrix) Vector === 372 func funcAvgOverTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 373 return aggrOverTime(vals, enh, func(values []Point) float64 { 374 var sum float64 375 for _, v := range values { 376 sum += v.V 377 } 378 return sum / float64(len(values)) 379 }) 380 } 381 382 // === count_over_time(Matrix ValueTypeMatrix) Vector === 383 func funcCountOverTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 384 return aggrOverTime(vals, enh, func(values []Point) float64 { 385 return float64(len(values)) 386 }) 387 } 388 389 // === floor(Vector ValueTypeVector) Vector === 390 // === max_over_time(Matrix ValueTypeMatrix) Vector === 391 func funcMaxOverTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 392 return aggrOverTime(vals, enh, func(values []Point) float64 { 393 max := math.Inf(-1) 394 for _, v := range values { 395 max = math.Max(max, v.V) 396 } 397 return max 398 }) 399 } 400 401 // === min_over_time(Matrix ValueTypeMatrix) Vector === 402 func funcMinOverTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 403 return aggrOverTime(vals, enh, func(values []Point) float64 { 404 min := math.Inf(1) 405 for _, v := range values { 406 min = math.Min(min, v.V) 407 } 408 return min 409 }) 410 } 411 412 // === sum_over_time(Matrix ValueTypeMatrix) Vector === 413 func funcSumOverTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 414 return aggrOverTime(vals, enh, func(values []Point) float64 { 415 var sum float64 416 for _, v := range values { 417 sum += v.V 418 } 419 return sum 420 }) 421 } 422 423 // === quantile_over_time(Matrix ValueTypeMatrix) Vector === 424 func funcQuantileOverTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 425 q := vals[0].(Vector)[0].V 426 mat := vals[1].(Matrix) 427 428 for _, el := range mat { 429 if len(el.Points) == 0 { 430 continue 431 } 432 433 values := make(vectorByValueHeap, 0, len(el.Points)) 434 for _, v := range el.Points { 435 values = append(values, Sample{Point: Point{V: v.V}}) 436 } 437 enh.out = append(enh.out, Sample{ 438 Point: Point{V: quantile(q, values)}, 439 }) 440 } 441 return enh.out 442 } 443 444 // === stddev_over_time(Matrix ValueTypeMatrix) Vector === 445 func funcStddevOverTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 446 return aggrOverTime(vals, enh, func(values []Point) float64 { 447 var sum, squaredSum, count float64 448 for _, v := range values { 449 sum += v.V 450 squaredSum += v.V * v.V 451 count++ 452 } 453 avg := sum / count 454 return math.Sqrt(squaredSum/count - avg*avg) 455 }) 456 } 457 458 // === stdvar_over_time(Matrix ValueTypeMatrix) Vector === 459 func funcStdvarOverTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 460 return aggrOverTime(vals, enh, func(values []Point) float64 { 461 var sum, squaredSum, count float64 462 for _, v := range values { 463 sum += v.V 464 squaredSum += v.V * v.V 465 count++ 466 } 467 avg := sum / count 468 return squaredSum/count - avg*avg 469 }) 470 } 471 472 // === absent(Vector ValueTypeVector) Vector === 473 func funcAbsent(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 474 if len(vals[0].(Vector)) > 0 { 475 return enh.out 476 } 477 m := []labels.Label{} 478 479 if vs, ok := args[0].(*VectorSelector); ok { 480 for _, ma := range vs.LabelMatchers { 481 if ma.Type == labels.MatchEqual && ma.Name != labels.MetricName { 482 m = append(m, labels.Label{Name: ma.Name, Value: ma.Value}) 483 } 484 } 485 } 486 return append(enh.out, 487 Sample{ 488 Metric: labels.New(m...), 489 Point: Point{V: 1}, 490 }) 491 } 492 493 func simpleFunc(vals []Value, enh *EvalNodeHelper, f func(float64) float64) Vector { 494 for _, el := range vals[0].(Vector) { 495 enh.out = append(enh.out, Sample{ 496 Metric: enh.dropMetricName(el.Metric), 497 Point: Point{V: f(el.V)}, 498 }) 499 } 500 return enh.out 501 } 502 503 // === abs(Vector ValueTypeVector) Vector === 504 func funcAbs(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 505 return simpleFunc(vals, enh, math.Abs) 506 } 507 508 // === ceil(Vector ValueTypeVector) Vector === 509 func funcCeil(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 510 return simpleFunc(vals, enh, math.Ceil) 511 } 512 513 // === floor(Vector ValueTypeVector) Vector === 514 func funcFloor(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 515 return simpleFunc(vals, enh, math.Floor) 516 } 517 518 // === exp(Vector ValueTypeVector) Vector === 519 func funcExp(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 520 return simpleFunc(vals, enh, math.Exp) 521 } 522 523 // === sqrt(Vector VectorNode) Vector === 524 func funcSqrt(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 525 return simpleFunc(vals, enh, math.Sqrt) 526 } 527 528 // === ln(Vector ValueTypeVector) Vector === 529 func funcLn(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 530 return simpleFunc(vals, enh, math.Log) 531 } 532 533 // === log2(Vector ValueTypeVector) Vector === 534 func funcLog2(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 535 return simpleFunc(vals, enh, math.Log2) 536 } 537 538 // === log10(Vector ValueTypeVector) Vector === 539 func funcLog10(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 540 return simpleFunc(vals, enh, math.Log10) 541 } 542 543 // === timestamp(Vector ValueTypeVector) Vector === 544 func funcTimestamp(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 545 vec := vals[0].(Vector) 546 for _, el := range vec { 547 enh.out = append(enh.out, Sample{ 548 Metric: enh.dropMetricName(el.Metric), 549 Point: Point{V: float64(el.T) / 1000}, 550 }) 551 } 552 return enh.out 553 } 554 555 // linearRegression performs a least-square linear regression analysis on the 556 // provided SamplePairs. It returns the slope, and the intercept value at the 557 // provided time. 558 func linearRegression(samples []Point, interceptTime int64) (slope, intercept float64) { 559 var ( 560 n float64 561 sumX, sumY float64 562 sumXY, sumX2 float64 563 ) 564 for _, sample := range samples { 565 x := float64(sample.T-interceptTime) / 1e3 566 n += 1.0 567 sumY += sample.V 568 sumX += x 569 sumXY += x * sample.V 570 sumX2 += x * x 571 } 572 covXY := sumXY - sumX*sumY/n 573 varX := sumX2 - sumX*sumX/n 574 575 slope = covXY / varX 576 intercept = sumY/n - slope*sumX/n 577 return slope, intercept 578 } 579 580 // === deriv(node ValueTypeMatrix) Vector === 581 func funcDeriv(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 582 mat := vals[0].(Matrix) 583 584 for _, samples := range mat { 585 // No sense in trying to compute a derivative without at least two points. 586 // Drop this Vector element. 587 if len(samples.Points) < 2 { 588 continue 589 } 590 591 // We pass in an arbitrary timestamp that is near the values in use 592 // to avoid floating point accuracy issues, see 593 // https://github.com/prometheus/prometheus/issues/2674 594 slope, _ := linearRegression(samples.Points, samples.Points[0].T) 595 enh.out = append(enh.out, Sample{ 596 Point: Point{V: slope}, 597 }) 598 } 599 return enh.out 600 } 601 602 // === predict_linear(node ValueTypeMatrix, k ValueTypeScalar) Vector === 603 func funcPredictLinear(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 604 mat := vals[0].(Matrix) 605 duration := vals[1].(Vector)[0].V 606 607 for _, samples := range mat { 608 // No sense in trying to predict anything without at least two points. 609 // Drop this Vector element. 610 if len(samples.Points) < 2 { 611 continue 612 } 613 slope, intercept := linearRegression(samples.Points, enh.ts) 614 615 enh.out = append(enh.out, Sample{ 616 Point: Point{V: slope*duration + intercept}, 617 }) 618 } 619 return enh.out 620 } 621 622 // === histogram_quantile(k ValueTypeScalar, Vector ValueTypeVector) Vector === 623 func funcHistogramQuantile(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 624 q := vals[0].(Vector)[0].V 625 inVec := vals[1].(Vector) 626 sigf := enh.signatureFunc(false, excludedLabels...) 627 628 if enh.signatureToMetricWithBuckets == nil { 629 enh.signatureToMetricWithBuckets = map[uint64]*metricWithBuckets{} 630 } else { 631 for _, v := range enh.signatureToMetricWithBuckets { 632 v.buckets = v.buckets[:0] 633 } 634 } 635 for _, el := range inVec { 636 upperBound, err := strconv.ParseFloat( 637 el.Metric.Get(model.BucketLabel), 64, 638 ) 639 if err != nil { 640 // Oops, no bucket label or malformed label value. Skip. 641 // TODO(beorn7): Issue a warning somehow. 642 continue 643 } 644 hash := sigf(el.Metric) 645 646 mb, ok := enh.signatureToMetricWithBuckets[hash] 647 if !ok { 648 el.Metric = labels.NewBuilder(el.Metric). 649 Del(labels.BucketLabel, labels.MetricName). 650 Labels() 651 652 mb = &metricWithBuckets{el.Metric, nil} 653 enh.signatureToMetricWithBuckets[hash] = mb 654 } 655 mb.buckets = append(mb.buckets, bucket{upperBound, el.V}) 656 } 657 658 for _, mb := range enh.signatureToMetricWithBuckets { 659 if len(mb.buckets) > 0 { 660 enh.out = append(enh.out, Sample{ 661 Metric: mb.metric, 662 Point: Point{V: bucketQuantile(q, mb.buckets)}, 663 }) 664 } 665 } 666 667 return enh.out 668 } 669 670 // === resets(Matrix ValueTypeMatrix) Vector === 671 func funcResets(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 672 in := vals[0].(Matrix) 673 674 for _, samples := range in { 675 resets := 0 676 prev := samples.Points[0].V 677 for _, sample := range samples.Points[1:] { 678 current := sample.V 679 if current < prev { 680 resets++ 681 } 682 prev = current 683 } 684 685 enh.out = append(enh.out, Sample{ 686 Point: Point{V: float64(resets)}, 687 }) 688 } 689 return enh.out 690 } 691 692 // === changes(Matrix ValueTypeMatrix) Vector === 693 func funcChanges(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 694 in := vals[0].(Matrix) 695 696 for _, samples := range in { 697 changes := 0 698 prev := samples.Points[0].V 699 for _, sample := range samples.Points[1:] { 700 current := sample.V 701 if current != prev && !(math.IsNaN(current) && math.IsNaN(prev)) { 702 changes++ 703 } 704 prev = current 705 } 706 707 enh.out = append(enh.out, Sample{ 708 Point: Point{V: float64(changes)}, 709 }) 710 } 711 return enh.out 712 } 713 714 // === label_replace(Vector ValueTypeVector, dst_label, replacement, src_labelname, regex ValueTypeString) Vector === 715 func funcLabelReplace(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 716 var ( 717 vector = vals[0].(Vector) 718 dst = args[1].(*StringLiteral).Val 719 repl = args[2].(*StringLiteral).Val 720 src = args[3].(*StringLiteral).Val 721 regexStr = args[4].(*StringLiteral).Val 722 ) 723 724 if enh.regex == nil { 725 var err error 726 enh.regex, err = regexp.Compile("^(?:" + regexStr + ")$") 727 if err != nil { 728 panic(fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr)) 729 } 730 if !model.LabelNameRE.MatchString(dst) { 731 panic(fmt.Errorf("invalid destination label name in label_replace(): %s", dst)) 732 } 733 enh.dmn = make(map[uint64]labels.Labels, len(enh.out)) 734 } 735 736 outSet := make(map[uint64]struct{}, len(vector)) 737 for _, el := range vector { 738 h := el.Metric.Hash() 739 var outMetric labels.Labels 740 if l, ok := enh.dmn[h]; ok { 741 outMetric = l 742 } else { 743 srcVal := el.Metric.Get(src) 744 indexes := enh.regex.FindStringSubmatchIndex(srcVal) 745 if indexes == nil { 746 // If there is no match, no replacement should take place. 747 outMetric = el.Metric 748 enh.dmn[h] = outMetric 749 } else { 750 res := enh.regex.ExpandString([]byte{}, repl, srcVal, indexes) 751 752 lb := labels.NewBuilder(el.Metric).Del(dst) 753 if len(res) > 0 { 754 lb.Set(dst, string(res)) 755 } 756 outMetric = lb.Labels() 757 enh.dmn[h] = outMetric 758 } 759 } 760 761 outHash := outMetric.Hash() 762 if _, ok := outSet[outHash]; ok { 763 panic(fmt.Errorf("duplicated label set in output of label_replace(): %s", el.Metric)) 764 } else { 765 enh.out = append(enh.out, 766 Sample{ 767 Metric: outMetric, 768 Point: Point{V: el.Point.V}, 769 }) 770 outSet[outHash] = struct{}{} 771 } 772 } 773 return enh.out 774 } 775 776 // === Vector(s Scalar) Vector === 777 func funcVector(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 778 return append(enh.out, 779 Sample{ 780 Metric: labels.Labels{}, 781 Point: Point{V: vals[0].(Vector)[0].V}, 782 }) 783 } 784 785 // === label_join(vector model.ValVector, dest_labelname, separator, src_labelname...) Vector === 786 func funcLabelJoin(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 787 var ( 788 vector = vals[0].(Vector) 789 dst = args[1].(*StringLiteral).Val 790 sep = args[2].(*StringLiteral).Val 791 srcLabels = make([]string, len(args)-3) 792 ) 793 794 if enh.dmn == nil { 795 enh.dmn = make(map[uint64]labels.Labels, len(enh.out)) 796 } 797 798 for i := 3; i < len(args); i++ { 799 src := args[i].(*StringLiteral).Val 800 if !model.LabelName(src).IsValid() { 801 panic(fmt.Errorf("invalid source label name in label_join(): %s", src)) 802 } 803 srcLabels[i-3] = src 804 } 805 806 if !model.LabelName(dst).IsValid() { 807 panic(fmt.Errorf("invalid destination label name in label_join(): %s", dst)) 808 } 809 810 outSet := make(map[uint64]struct{}, len(vector)) 811 srcVals := make([]string, len(srcLabels)) 812 for _, el := range vector { 813 h := el.Metric.Hash() 814 var outMetric labels.Labels 815 if l, ok := enh.dmn[h]; ok { 816 outMetric = l 817 } else { 818 819 for i, src := range srcLabels { 820 srcVals[i] = el.Metric.Get(src) 821 } 822 823 lb := labels.NewBuilder(el.Metric) 824 825 strval := strings.Join(srcVals, sep) 826 if strval == "" { 827 lb.Del(dst) 828 } else { 829 lb.Set(dst, strval) 830 } 831 832 outMetric = lb.Labels() 833 enh.dmn[h] = outMetric 834 } 835 outHash := outMetric.Hash() 836 837 if _, exists := outSet[outHash]; exists { 838 panic(fmt.Errorf("duplicated label set in output of label_join(): %s", el.Metric)) 839 } else { 840 enh.out = append(enh.out, Sample{ 841 Metric: outMetric, 842 Point: Point{V: el.Point.V}, 843 }) 844 outSet[outHash] = struct{}{} 845 } 846 } 847 return enh.out 848 } 849 850 // Common code for date related functions. 851 func dateWrapper(vals []Value, enh *EvalNodeHelper, f func(time.Time) float64) Vector { 852 if len(vals) == 0 { 853 return append(enh.out, 854 Sample{ 855 Metric: labels.Labels{}, 856 Point: Point{V: f(time.Unix(enh.ts/1000, 0).UTC())}, 857 }) 858 } 859 860 for _, el := range vals[0].(Vector) { 861 t := time.Unix(int64(el.V), 0).UTC() 862 enh.out = append(enh.out, Sample{ 863 Metric: enh.dropMetricName(el.Metric), 864 Point: Point{V: f(t)}, 865 }) 866 } 867 return enh.out 868 } 869 870 // === days_in_month(v Vector) Scalar === 871 func funcDaysInMonth(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 872 return dateWrapper(vals, enh, func(t time.Time) float64 { 873 return float64(32 - time.Date(t.Year(), t.Month(), 32, 0, 0, 0, 0, time.UTC).Day()) 874 }) 875 } 876 877 // === day_of_month(v Vector) Scalar === 878 func funcDayOfMonth(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 879 return dateWrapper(vals, enh, func(t time.Time) float64 { 880 return float64(t.Day()) 881 }) 882 } 883 884 // === day_of_week(v Vector) Scalar === 885 func funcDayOfWeek(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 886 return dateWrapper(vals, enh, func(t time.Time) float64 { 887 return float64(t.Weekday()) 888 }) 889 } 890 891 // === hour(v Vector) Scalar === 892 func funcHour(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 893 return dateWrapper(vals, enh, func(t time.Time) float64 { 894 return float64(t.Hour()) 895 }) 896 } 897 898 // === minute(v Vector) Scalar === 899 func funcMinute(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 900 return dateWrapper(vals, enh, func(t time.Time) float64 { 901 return float64(t.Minute()) 902 }) 903 } 904 905 // === month(v Vector) Scalar === 906 func funcMonth(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 907 return dateWrapper(vals, enh, func(t time.Time) float64 { 908 return float64(t.Month()) 909 }) 910 } 911 912 // === year(v Vector) Scalar === 913 func funcYear(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { 914 return dateWrapper(vals, enh, func(t time.Time) float64 { 915 return float64(t.Year()) 916 }) 917 } 918 919 var functions = map[string]*Function{ 920 "abs": { 921 Name: "abs", 922 ArgTypes: []ValueType{ValueTypeVector}, 923 ReturnType: ValueTypeVector, 924 Call: funcAbs, 925 }, 926 "absent": { 927 Name: "absent", 928 ArgTypes: []ValueType{ValueTypeVector}, 929 ReturnType: ValueTypeVector, 930 Call: funcAbsent, 931 }, 932 "avg_over_time": { 933 Name: "avg_over_time", 934 ArgTypes: []ValueType{ValueTypeMatrix}, 935 ReturnType: ValueTypeVector, 936 Call: funcAvgOverTime, 937 }, 938 "ceil": { 939 Name: "ceil", 940 ArgTypes: []ValueType{ValueTypeVector}, 941 ReturnType: ValueTypeVector, 942 Call: funcCeil, 943 }, 944 "changes": { 945 Name: "changes", 946 ArgTypes: []ValueType{ValueTypeMatrix}, 947 ReturnType: ValueTypeVector, 948 Call: funcChanges, 949 }, 950 "clamp_max": { 951 Name: "clamp_max", 952 ArgTypes: []ValueType{ValueTypeVector, ValueTypeScalar}, 953 ReturnType: ValueTypeVector, 954 Call: funcClampMax, 955 }, 956 "clamp_min": { 957 Name: "clamp_min", 958 ArgTypes: []ValueType{ValueTypeVector, ValueTypeScalar}, 959 ReturnType: ValueTypeVector, 960 Call: funcClampMin, 961 }, 962 "count_over_time": { 963 Name: "count_over_time", 964 ArgTypes: []ValueType{ValueTypeMatrix}, 965 ReturnType: ValueTypeVector, 966 Call: funcCountOverTime, 967 }, 968 "days_in_month": { 969 Name: "days_in_month", 970 ArgTypes: []ValueType{ValueTypeVector}, 971 Variadic: 1, 972 ReturnType: ValueTypeVector, 973 Call: funcDaysInMonth, 974 }, 975 "day_of_month": { 976 Name: "day_of_month", 977 ArgTypes: []ValueType{ValueTypeVector}, 978 Variadic: 1, 979 ReturnType: ValueTypeVector, 980 Call: funcDayOfMonth, 981 }, 982 "day_of_week": { 983 Name: "day_of_week", 984 ArgTypes: []ValueType{ValueTypeVector}, 985 Variadic: 1, 986 ReturnType: ValueTypeVector, 987 Call: funcDayOfWeek, 988 }, 989 "delta": { 990 Name: "delta", 991 ArgTypes: []ValueType{ValueTypeMatrix}, 992 ReturnType: ValueTypeVector, 993 Call: funcDelta, 994 }, 995 "deriv": { 996 Name: "deriv", 997 ArgTypes: []ValueType{ValueTypeMatrix}, 998 ReturnType: ValueTypeVector, 999 Call: funcDeriv, 1000 }, 1001 "exp": { 1002 Name: "exp", 1003 ArgTypes: []ValueType{ValueTypeVector}, 1004 ReturnType: ValueTypeVector, 1005 Call: funcExp, 1006 }, 1007 "floor": { 1008 Name: "floor", 1009 ArgTypes: []ValueType{ValueTypeVector}, 1010 ReturnType: ValueTypeVector, 1011 Call: funcFloor, 1012 }, 1013 "histogram_quantile": { 1014 Name: "histogram_quantile", 1015 ArgTypes: []ValueType{ValueTypeScalar, ValueTypeVector}, 1016 ReturnType: ValueTypeVector, 1017 Call: funcHistogramQuantile, 1018 }, 1019 "holt_winters": { 1020 Name: "holt_winters", 1021 ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar}, 1022 ReturnType: ValueTypeVector, 1023 Call: funcHoltWinters, 1024 }, 1025 "hour": { 1026 Name: "hour", 1027 ArgTypes: []ValueType{ValueTypeVector}, 1028 Variadic: 1, 1029 ReturnType: ValueTypeVector, 1030 Call: funcHour, 1031 }, 1032 "idelta": { 1033 Name: "idelta", 1034 ArgTypes: []ValueType{ValueTypeMatrix}, 1035 ReturnType: ValueTypeVector, 1036 Call: funcIdelta, 1037 }, 1038 "increase": { 1039 Name: "increase", 1040 ArgTypes: []ValueType{ValueTypeMatrix}, 1041 ReturnType: ValueTypeVector, 1042 Call: funcIncrease, 1043 }, 1044 "irate": { 1045 Name: "irate", 1046 ArgTypes: []ValueType{ValueTypeMatrix}, 1047 ReturnType: ValueTypeVector, 1048 Call: funcIrate, 1049 }, 1050 "label_replace": { 1051 Name: "label_replace", 1052 ArgTypes: []ValueType{ValueTypeVector, ValueTypeString, ValueTypeString, ValueTypeString, ValueTypeString}, 1053 ReturnType: ValueTypeVector, 1054 Call: funcLabelReplace, 1055 }, 1056 "label_join": { 1057 Name: "label_join", 1058 ArgTypes: []ValueType{ValueTypeVector, ValueTypeString, ValueTypeString, ValueTypeString}, 1059 Variadic: -1, 1060 ReturnType: ValueTypeVector, 1061 Call: funcLabelJoin, 1062 }, 1063 "ln": { 1064 Name: "ln", 1065 ArgTypes: []ValueType{ValueTypeVector}, 1066 ReturnType: ValueTypeVector, 1067 Call: funcLn, 1068 }, 1069 "log10": { 1070 Name: "log10", 1071 ArgTypes: []ValueType{ValueTypeVector}, 1072 ReturnType: ValueTypeVector, 1073 Call: funcLog10, 1074 }, 1075 "log2": { 1076 Name: "log2", 1077 ArgTypes: []ValueType{ValueTypeVector}, 1078 ReturnType: ValueTypeVector, 1079 Call: funcLog2, 1080 }, 1081 "max_over_time": { 1082 Name: "max_over_time", 1083 ArgTypes: []ValueType{ValueTypeMatrix}, 1084 ReturnType: ValueTypeVector, 1085 Call: funcMaxOverTime, 1086 }, 1087 "min_over_time": { 1088 Name: "min_over_time", 1089 ArgTypes: []ValueType{ValueTypeMatrix}, 1090 ReturnType: ValueTypeVector, 1091 Call: funcMinOverTime, 1092 }, 1093 "minute": { 1094 Name: "minute", 1095 ArgTypes: []ValueType{ValueTypeVector}, 1096 Variadic: 1, 1097 ReturnType: ValueTypeVector, 1098 Call: funcMinute, 1099 }, 1100 "month": { 1101 Name: "month", 1102 ArgTypes: []ValueType{ValueTypeVector}, 1103 Variadic: 1, 1104 ReturnType: ValueTypeVector, 1105 Call: funcMonth, 1106 }, 1107 "predict_linear": { 1108 Name: "predict_linear", 1109 ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar}, 1110 ReturnType: ValueTypeVector, 1111 Call: funcPredictLinear, 1112 }, 1113 "quantile_over_time": { 1114 Name: "quantile_over_time", 1115 ArgTypes: []ValueType{ValueTypeScalar, ValueTypeMatrix}, 1116 ReturnType: ValueTypeVector, 1117 Call: funcQuantileOverTime, 1118 }, 1119 "rate": { 1120 Name: "rate", 1121 ArgTypes: []ValueType{ValueTypeMatrix}, 1122 ReturnType: ValueTypeVector, 1123 Call: funcRate, 1124 }, 1125 "resets": { 1126 Name: "resets", 1127 ArgTypes: []ValueType{ValueTypeMatrix}, 1128 ReturnType: ValueTypeVector, 1129 Call: funcResets, 1130 }, 1131 "round": { 1132 Name: "round", 1133 ArgTypes: []ValueType{ValueTypeVector, ValueTypeScalar}, 1134 Variadic: 1, 1135 ReturnType: ValueTypeVector, 1136 Call: funcRound, 1137 }, 1138 "scalar": { 1139 Name: "scalar", 1140 ArgTypes: []ValueType{ValueTypeVector}, 1141 ReturnType: ValueTypeScalar, 1142 Call: funcScalar, 1143 }, 1144 "sort": { 1145 Name: "sort", 1146 ArgTypes: []ValueType{ValueTypeVector}, 1147 ReturnType: ValueTypeVector, 1148 Call: funcSort, 1149 }, 1150 "sort_desc": { 1151 Name: "sort_desc", 1152 ArgTypes: []ValueType{ValueTypeVector}, 1153 ReturnType: ValueTypeVector, 1154 Call: funcSortDesc, 1155 }, 1156 "sqrt": { 1157 Name: "sqrt", 1158 ArgTypes: []ValueType{ValueTypeVector}, 1159 ReturnType: ValueTypeVector, 1160 Call: funcSqrt, 1161 }, 1162 "stddev_over_time": { 1163 Name: "stddev_over_time", 1164 ArgTypes: []ValueType{ValueTypeMatrix}, 1165 ReturnType: ValueTypeVector, 1166 Call: funcStddevOverTime, 1167 }, 1168 "stdvar_over_time": { 1169 Name: "stdvar_over_time", 1170 ArgTypes: []ValueType{ValueTypeMatrix}, 1171 ReturnType: ValueTypeVector, 1172 Call: funcStdvarOverTime, 1173 }, 1174 "sum_over_time": { 1175 Name: "sum_over_time", 1176 ArgTypes: []ValueType{ValueTypeMatrix}, 1177 ReturnType: ValueTypeVector, 1178 Call: funcSumOverTime, 1179 }, 1180 "time": { 1181 Name: "time", 1182 ArgTypes: []ValueType{}, 1183 ReturnType: ValueTypeScalar, 1184 Call: funcTime, 1185 }, 1186 "timestamp": { 1187 Name: "timestamp", 1188 ArgTypes: []ValueType{ValueTypeVector}, 1189 ReturnType: ValueTypeVector, 1190 Call: funcTimestamp, 1191 }, 1192 "vector": { 1193 Name: "vector", 1194 ArgTypes: []ValueType{ValueTypeScalar}, 1195 ReturnType: ValueTypeVector, 1196 Call: funcVector, 1197 }, 1198 "year": { 1199 Name: "year", 1200 ArgTypes: []ValueType{ValueTypeVector}, 1201 Variadic: 1, 1202 ReturnType: ValueTypeVector, 1203 Call: funcYear, 1204 }, 1205 } 1206 1207 // getFunction returns a predefined Function object for the given name. 1208 func getFunction(name string) (*Function, bool) { 1209 function, ok := functions[name] 1210 return function, ok 1211 } 1212 1213 type vectorByValueHeap Vector 1214 1215 func (s vectorByValueHeap) Len() int { 1216 return len(s) 1217 } 1218 1219 func (s vectorByValueHeap) Less(i, j int) bool { 1220 if math.IsNaN(s[i].V) { 1221 return true 1222 } 1223 return s[i].V < s[j].V 1224 } 1225 1226 func (s vectorByValueHeap) Swap(i, j int) { 1227 s[i], s[j] = s[j], s[i] 1228 } 1229 1230 func (s *vectorByValueHeap) Push(x interface{}) { 1231 *s = append(*s, *(x.(*Sample))) 1232 } 1233 1234 func (s *vectorByValueHeap) Pop() interface{} { 1235 old := *s 1236 n := len(old) 1237 el := old[n-1] 1238 *s = old[0 : n-1] 1239 return el 1240 } 1241 1242 type vectorByReverseValueHeap Vector 1243 1244 func (s vectorByReverseValueHeap) Len() int { 1245 return len(s) 1246 } 1247 1248 func (s vectorByReverseValueHeap) Less(i, j int) bool { 1249 if math.IsNaN(s[i].V) { 1250 return true 1251 } 1252 return s[i].V > s[j].V 1253 } 1254 1255 func (s vectorByReverseValueHeap) Swap(i, j int) { 1256 s[i], s[j] = s[j], s[i] 1257 } 1258 1259 func (s *vectorByReverseValueHeap) Push(x interface{}) { 1260 *s = append(*s, *(x.(*Sample))) 1261 } 1262 1263 func (s *vectorByReverseValueHeap) Pop() interface{} { 1264 old := *s 1265 n := len(old) 1266 el := old[n-1] 1267 *s = old[0 : n-1] 1268 return el 1269 }