github.com/influxdata/influxdb/v2@v2.7.6/influxql/query/functions.go (about) 1 package query 2 3 import ( 4 "container/heap" 5 "encoding/base64" 6 "fmt" 7 "math" 8 "sort" 9 "time" 10 11 "github.com/influxdata/influxdb/v2/influxql/query/internal/gota" 12 "github.com/influxdata/influxdb/v2/influxql/query/neldermead" 13 "github.com/influxdata/influxdb/v2/pkg/estimator/hll" 14 "github.com/influxdata/influxql" 15 ) 16 17 var hllPrefix = []byte("HLL_") 18 var hllErrorPrefix = []byte("HLLERROR ") 19 20 // FieldMapper is a FieldMapper that wraps another FieldMapper and exposes 21 // the functions implemented by the query engine. 22 type queryFieldMapper struct { 23 influxql.FieldMapper 24 } 25 26 func (m queryFieldMapper) CallType(name string, args []influxql.DataType) (influxql.DataType, error) { 27 if mapper, ok := m.FieldMapper.(influxql.CallTypeMapper); ok { 28 typ, err := mapper.CallType(name, args) 29 if err != nil { 30 return influxql.Unknown, err 31 } else if typ != influxql.Unknown { 32 return typ, nil 33 } 34 } 35 36 // Use the default FunctionTypeMapper for the query engine. 37 typmap := FunctionTypeMapper{} 38 return typmap.CallType(name, args) 39 } 40 41 // CallTypeMapper returns the types for call iterator functions. 42 // Call iterator functions are commonly implemented within the storage engine 43 // so this mapper is limited to only the return values of those functions. 44 type CallTypeMapper struct{} 45 46 func (CallTypeMapper) MapType(measurement *influxql.Measurement, field string) influxql.DataType { 47 return influxql.Unknown 48 } 49 50 func (CallTypeMapper) CallType(name string, args []influxql.DataType) (influxql.DataType, error) { 51 // If the function is not implemented by the embedded field mapper, then 52 // see if we implement the function and return the type here. 53 switch name { 54 case "mean": 55 return influxql.Float, nil 56 case "count": 57 return influxql.Integer, nil 58 case "min", "max", "sum", "first", "last": 59 // TODO(jsternberg): Verify the input type. 60 return args[0], nil 61 } 62 return influxql.Unknown, nil 63 } 64 65 // FunctionTypeMapper handles the type mapping for all functions implemented by the 66 // query engine. 67 type FunctionTypeMapper struct { 68 CallTypeMapper 69 } 70 71 func (FunctionTypeMapper) MapType(measurement *influxql.Measurement, field string) influxql.DataType { 72 return influxql.Unknown 73 } 74 75 func (m FunctionTypeMapper) CallType(name string, args []influxql.DataType) (influxql.DataType, error) { 76 if typ, err := m.CallTypeMapper.CallType(name, args); typ != influxql.Unknown || err != nil { 77 return typ, err 78 } 79 80 // Handle functions implemented by the query engine. 81 switch name { 82 case "median", "integral", "stddev", 83 "derivative", "non_negative_derivative", 84 "moving_average", 85 "exponential_moving_average", 86 "double_exponential_moving_average", 87 "triple_exponential_moving_average", 88 "relative_strength_index", 89 "triple_exponential_derivative", 90 "kaufmans_efficiency_ratio", 91 "kaufmans_adaptive_moving_average", 92 "chande_momentum_oscillator", 93 "holt_winters", "holt_winters_with_fit": 94 return influxql.Float, nil 95 case "elapsed": 96 return influxql.Integer, nil 97 default: 98 // TODO(jsternberg): Do not use default for this. 99 return args[0], nil 100 } 101 } 102 103 // FloatMeanReducer calculates the mean of the aggregated points. 104 type FloatMeanReducer struct { 105 sum float64 106 count uint32 107 } 108 109 // NewFloatMeanReducer creates a new FloatMeanReducer. 110 func NewFloatMeanReducer() *FloatMeanReducer { 111 return &FloatMeanReducer{} 112 } 113 114 // AggregateFloat aggregates a point into the reducer. 115 func (r *FloatMeanReducer) AggregateFloat(p *FloatPoint) { 116 if p.Aggregated >= 2 { 117 r.sum += p.Value * float64(p.Aggregated) 118 r.count += p.Aggregated 119 } else { 120 r.sum += p.Value 121 r.count++ 122 } 123 } 124 125 // Emit emits the mean of the aggregated points as a single point. 126 func (r *FloatMeanReducer) Emit() []FloatPoint { 127 return []FloatPoint{{ 128 Time: ZeroTime, 129 Value: r.sum / float64(r.count), 130 Aggregated: r.count, 131 }} 132 } 133 134 // IntegerMeanReducer calculates the mean of the aggregated points. 135 type IntegerMeanReducer struct { 136 sum int64 137 count uint32 138 } 139 140 // NewIntegerMeanReducer creates a new IntegerMeanReducer. 141 func NewIntegerMeanReducer() *IntegerMeanReducer { 142 return &IntegerMeanReducer{} 143 } 144 145 // AggregateInteger aggregates a point into the reducer. 146 func (r *IntegerMeanReducer) AggregateInteger(p *IntegerPoint) { 147 if p.Aggregated >= 2 { 148 r.sum += p.Value * int64(p.Aggregated) 149 r.count += p.Aggregated 150 } else { 151 r.sum += p.Value 152 r.count++ 153 } 154 } 155 156 // Emit emits the mean of the aggregated points as a single point. 157 func (r *IntegerMeanReducer) Emit() []FloatPoint { 158 return []FloatPoint{{ 159 Time: ZeroTime, 160 Value: float64(r.sum) / float64(r.count), 161 Aggregated: r.count, 162 }} 163 } 164 165 // UnsignedMeanReducer calculates the mean of the aggregated points. 166 type UnsignedMeanReducer struct { 167 sum uint64 168 count uint32 169 } 170 171 // NewUnsignedMeanReducer creates a new UnsignedMeanReducer. 172 func NewUnsignedMeanReducer() *UnsignedMeanReducer { 173 return &UnsignedMeanReducer{} 174 } 175 176 // AggregateUnsigned aggregates a point into the reducer. 177 func (r *UnsignedMeanReducer) AggregateUnsigned(p *UnsignedPoint) { 178 if p.Aggregated >= 2 { 179 r.sum += p.Value * uint64(p.Aggregated) 180 r.count += p.Aggregated 181 } else { 182 r.sum += p.Value 183 r.count++ 184 } 185 } 186 187 // Emit emits the mean of the aggregated points as a single point. 188 func (r *UnsignedMeanReducer) Emit() []FloatPoint { 189 return []FloatPoint{{ 190 Time: ZeroTime, 191 Value: float64(r.sum) / float64(r.count), 192 Aggregated: r.count, 193 }} 194 } 195 196 type FloatSpreadReducer struct { 197 min, max float64 198 count uint32 199 } 200 201 func NewFloatSpreadReducer() *FloatSpreadReducer { 202 return &FloatSpreadReducer{ 203 min: math.Inf(1), 204 max: math.Inf(-1), 205 } 206 } 207 208 func (r *FloatSpreadReducer) AggregateFloat(p *FloatPoint) { 209 r.min = math.Min(r.min, p.Value) 210 r.max = math.Max(r.max, p.Value) 211 r.count++ 212 } 213 214 func (r *FloatSpreadReducer) Emit() []FloatPoint { 215 return []FloatPoint{{ 216 Time: ZeroTime, 217 Value: r.max - r.min, 218 Aggregated: r.count, 219 }} 220 } 221 222 type IntegerSpreadReducer struct { 223 min, max int64 224 count uint32 225 } 226 227 func NewIntegerSpreadReducer() *IntegerSpreadReducer { 228 return &IntegerSpreadReducer{ 229 min: math.MaxInt64, 230 max: math.MinInt64, 231 } 232 } 233 234 func (r *IntegerSpreadReducer) AggregateInteger(p *IntegerPoint) { 235 if p.Value < r.min { 236 r.min = p.Value 237 } 238 if p.Value > r.max { 239 r.max = p.Value 240 } 241 r.count++ 242 } 243 244 func (r *IntegerSpreadReducer) Emit() []IntegerPoint { 245 return []IntegerPoint{{ 246 Time: ZeroTime, 247 Value: r.max - r.min, 248 Aggregated: r.count, 249 }} 250 } 251 252 type UnsignedSpreadReducer struct { 253 min, max uint64 254 count uint32 255 } 256 257 func NewUnsignedSpreadReducer() *UnsignedSpreadReducer { 258 return &UnsignedSpreadReducer{ 259 min: math.MaxUint64, 260 max: 0, 261 } 262 } 263 264 func (r *UnsignedSpreadReducer) AggregateUnsigned(p *UnsignedPoint) { 265 if p.Value < r.min { 266 r.min = p.Value 267 } 268 if p.Value > r.max { 269 r.max = p.Value 270 } 271 r.count++ 272 } 273 274 func (r *UnsignedSpreadReducer) Emit() []UnsignedPoint { 275 return []UnsignedPoint{{ 276 Time: ZeroTime, 277 Value: r.max - r.min, 278 Aggregated: r.count, 279 }} 280 } 281 282 // FloatDerivativeReducer calculates the derivative of the aggregated points. 283 type FloatDerivativeReducer struct { 284 interval Interval 285 prev FloatPoint 286 curr FloatPoint 287 isNonNegative bool 288 ascending bool 289 } 290 291 // NewFloatDerivativeReducer creates a new FloatDerivativeReducer. 292 func NewFloatDerivativeReducer(interval Interval, isNonNegative, ascending bool) *FloatDerivativeReducer { 293 return &FloatDerivativeReducer{ 294 interval: interval, 295 isNonNegative: isNonNegative, 296 ascending: ascending, 297 prev: FloatPoint{Nil: true}, 298 curr: FloatPoint{Nil: true}, 299 } 300 } 301 302 // AggregateFloat aggregates a point into the reducer and updates the current window. 303 func (r *FloatDerivativeReducer) AggregateFloat(p *FloatPoint) { 304 // Skip past a point when it does not advance the stream. A joined series 305 // may have multiple points at the same time so we will discard anything 306 // except the first point we encounter. 307 if !r.curr.Nil && r.curr.Time == p.Time { 308 return 309 } 310 311 r.prev = r.curr 312 r.curr = *p 313 } 314 315 // Emit emits the derivative of the reducer at the current point. 316 func (r *FloatDerivativeReducer) Emit() []FloatPoint { 317 if r.prev.Nil { 318 return nil 319 } 320 // Calculate the derivative of successive points by dividing the 321 // difference of each value by the elapsed time normalized to the interval. 322 diff := r.curr.Value - r.prev.Value 323 elapsed := r.curr.Time - r.prev.Time 324 if !r.ascending { 325 elapsed = -elapsed 326 } 327 value := diff / (float64(elapsed) / float64(r.interval.Duration)) 328 329 // Mark this point as read by changing the previous point to nil. 330 r.prev.Nil = true 331 332 // Drop negative values for non-negative derivatives. 333 if r.isNonNegative && diff < 0 { 334 return nil 335 } 336 return []FloatPoint{{Time: r.curr.Time, Value: value}} 337 } 338 339 // IntegerDerivativeReducer calculates the derivative of the aggregated points. 340 type IntegerDerivativeReducer struct { 341 interval Interval 342 prev IntegerPoint 343 curr IntegerPoint 344 isNonNegative bool 345 ascending bool 346 } 347 348 // NewIntegerDerivativeReducer creates a new IntegerDerivativeReducer. 349 func NewIntegerDerivativeReducer(interval Interval, isNonNegative, ascending bool) *IntegerDerivativeReducer { 350 return &IntegerDerivativeReducer{ 351 interval: interval, 352 isNonNegative: isNonNegative, 353 ascending: ascending, 354 prev: IntegerPoint{Nil: true}, 355 curr: IntegerPoint{Nil: true}, 356 } 357 } 358 359 // AggregateInteger aggregates a point into the reducer and updates the current window. 360 func (r *IntegerDerivativeReducer) AggregateInteger(p *IntegerPoint) { 361 // Skip past a point when it does not advance the stream. A joined series 362 // may have multiple points at the same time so we will discard anything 363 // except the first point we encounter. 364 if !r.curr.Nil && r.curr.Time == p.Time { 365 return 366 } 367 368 r.prev = r.curr 369 r.curr = *p 370 } 371 372 // Emit emits the derivative of the reducer at the current point. 373 func (r *IntegerDerivativeReducer) Emit() []FloatPoint { 374 if r.prev.Nil { 375 return nil 376 } 377 378 // Calculate the derivative of successive points by dividing the 379 // difference of each value by the elapsed time normalized to the interval. 380 diff := float64(r.curr.Value - r.prev.Value) 381 elapsed := r.curr.Time - r.prev.Time 382 if !r.ascending { 383 elapsed = -elapsed 384 } 385 value := diff / (float64(elapsed) / float64(r.interval.Duration)) 386 387 // Mark this point as read by changing the previous point to nil. 388 r.prev.Nil = true 389 390 // Drop negative values for non-negative derivatives. 391 if r.isNonNegative && diff < 0 { 392 return nil 393 } 394 return []FloatPoint{{Time: r.curr.Time, Value: value}} 395 } 396 397 // UnsignedDerivativeReducer calculates the derivative of the aggregated points. 398 type UnsignedDerivativeReducer struct { 399 interval Interval 400 prev UnsignedPoint 401 curr UnsignedPoint 402 isNonNegative bool 403 ascending bool 404 } 405 406 // NewUnsignedDerivativeReducer creates a new UnsignedDerivativeReducer. 407 func NewUnsignedDerivativeReducer(interval Interval, isNonNegative, ascending bool) *UnsignedDerivativeReducer { 408 return &UnsignedDerivativeReducer{ 409 interval: interval, 410 isNonNegative: isNonNegative, 411 ascending: ascending, 412 prev: UnsignedPoint{Nil: true}, 413 curr: UnsignedPoint{Nil: true}, 414 } 415 } 416 417 // AggregateUnsigned aggregates a point into the reducer and updates the current window. 418 func (r *UnsignedDerivativeReducer) AggregateUnsigned(p *UnsignedPoint) { 419 // Skip past a point when it does not advance the stream. A joined series 420 // may have multiple points at the same time so we will discard anything 421 // except the first point we encounter. 422 if !r.curr.Nil && r.curr.Time == p.Time { 423 return 424 } 425 426 r.prev = r.curr 427 r.curr = *p 428 } 429 430 // Emit emits the derivative of the reducer at the current point. 431 func (r *UnsignedDerivativeReducer) Emit() []FloatPoint { 432 if r.prev.Nil { 433 return nil 434 } 435 // Calculate the derivative of successive points by dividing the 436 // difference of each value by the elapsed time normalized to the interval. 437 var diff float64 438 if r.curr.Value > r.prev.Value { 439 diff = float64(r.curr.Value - r.prev.Value) 440 } else { 441 diff = -float64(r.prev.Value - r.curr.Value) 442 } 443 elapsed := r.curr.Time - r.prev.Time 444 if !r.ascending { 445 elapsed = -elapsed 446 } 447 value := diff / (float64(elapsed) / float64(r.interval.Duration)) 448 449 // Mark this point as read by changing the previous point to nil. 450 r.prev.Nil = true 451 452 // Drop negative values for non-negative derivatives. 453 if r.isNonNegative && diff < 0 { 454 return nil 455 } 456 return []FloatPoint{{Time: r.curr.Time, Value: value}} 457 } 458 459 // FloatDifferenceReducer calculates the derivative of the aggregated points. 460 type FloatDifferenceReducer struct { 461 isNonNegative bool 462 prev FloatPoint 463 curr FloatPoint 464 } 465 466 // NewFloatDifferenceReducer creates a new FloatDifferenceReducer. 467 func NewFloatDifferenceReducer(isNonNegative bool) *FloatDifferenceReducer { 468 return &FloatDifferenceReducer{ 469 isNonNegative: isNonNegative, 470 prev: FloatPoint{Nil: true}, 471 curr: FloatPoint{Nil: true}, 472 } 473 } 474 475 // AggregateFloat aggregates a point into the reducer and updates the current window. 476 func (r *FloatDifferenceReducer) AggregateFloat(p *FloatPoint) { 477 // Skip past a point when it does not advance the stream. A joined series 478 // may have multiple points at the same time so we will discard anything 479 // except the first point we encounter. 480 if !r.curr.Nil && r.curr.Time == p.Time { 481 return 482 } 483 484 r.prev = r.curr 485 r.curr = *p 486 } 487 488 // Emit emits the difference of the reducer at the current point. 489 func (r *FloatDifferenceReducer) Emit() []FloatPoint { 490 if r.prev.Nil { 491 return nil 492 } 493 494 // Calculate the difference of successive points. 495 value := r.curr.Value - r.prev.Value 496 497 // If it is non_negative_difference discard any negative value. Since 498 // prev is still marked as unread. The correctness can be ensured. 499 if r.isNonNegative && value < 0 { 500 return nil 501 } 502 503 // Mark this point as read by changing the previous point to nil. 504 r.prev.Nil = true 505 return []FloatPoint{{Time: r.curr.Time, Value: value}} 506 } 507 508 // IntegerDifferenceReducer calculates the derivative of the aggregated points. 509 type IntegerDifferenceReducer struct { 510 isNonNegative bool 511 prev IntegerPoint 512 curr IntegerPoint 513 } 514 515 // NewIntegerDifferenceReducer creates a new IntegerDifferenceReducer. 516 func NewIntegerDifferenceReducer(isNonNegative bool) *IntegerDifferenceReducer { 517 return &IntegerDifferenceReducer{ 518 isNonNegative: isNonNegative, 519 prev: IntegerPoint{Nil: true}, 520 curr: IntegerPoint{Nil: true}, 521 } 522 } 523 524 // AggregateInteger aggregates a point into the reducer and updates the current window. 525 func (r *IntegerDifferenceReducer) AggregateInteger(p *IntegerPoint) { 526 // Skip past a point when it does not advance the stream. A joined series 527 // may have multiple points at the same time so we will discard anything 528 // except the first point we encounter. 529 if !r.curr.Nil && r.curr.Time == p.Time { 530 return 531 } 532 533 r.prev = r.curr 534 r.curr = *p 535 } 536 537 // Emit emits the difference of the reducer at the current point. 538 func (r *IntegerDifferenceReducer) Emit() []IntegerPoint { 539 if r.prev.Nil { 540 return nil 541 } 542 543 // Calculate the difference of successive points. 544 value := r.curr.Value - r.prev.Value 545 546 // If it is non_negative_difference discard any negative value. Since 547 // prev is still marked as unread. The correctness can be ensured. 548 if r.isNonNegative && value < 0 { 549 return nil 550 } 551 552 // Mark this point as read by changing the previous point to nil. 553 r.prev.Nil = true 554 555 return []IntegerPoint{{Time: r.curr.Time, Value: value}} 556 } 557 558 // UnsignedDifferenceReducer calculates the derivative of the aggregated points. 559 type UnsignedDifferenceReducer struct { 560 isNonNegative bool 561 prev UnsignedPoint 562 curr UnsignedPoint 563 } 564 565 // NewUnsignedDifferenceReducer creates a new UnsignedDifferenceReducer. 566 func NewUnsignedDifferenceReducer(isNonNegative bool) *UnsignedDifferenceReducer { 567 return &UnsignedDifferenceReducer{ 568 isNonNegative: isNonNegative, 569 prev: UnsignedPoint{Nil: true}, 570 curr: UnsignedPoint{Nil: true}, 571 } 572 } 573 574 // AggregateUnsigned aggregates a point into the reducer and updates the current window. 575 func (r *UnsignedDifferenceReducer) AggregateUnsigned(p *UnsignedPoint) { 576 // Skip past a point when it does not advance the stream. A joined series 577 // may have multiple points at the same time so we will discard anything 578 // except the first point we encounter. 579 if !r.curr.Nil && r.curr.Time == p.Time { 580 return 581 } 582 583 r.prev = r.curr 584 r.curr = *p 585 } 586 587 // Emit emits the difference of the reducer at the current point. 588 func (r *UnsignedDifferenceReducer) Emit() []UnsignedPoint { 589 if r.prev.Nil { 590 return nil 591 } 592 593 // If it is non_negative_difference discard any negative value. Since 594 // prev is still marked as unread. The correctness can be ensured. 595 if r.isNonNegative && r.curr.Value < r.prev.Value { 596 return nil 597 } 598 599 // Calculate the difference of successive points. 600 value := r.curr.Value - r.prev.Value 601 602 // Mark this point as read by changing the previous point to nil. 603 r.prev.Nil = true 604 605 return []UnsignedPoint{{Time: r.curr.Time, Value: value}} 606 } 607 608 // FloatMovingAverageReducer calculates the moving average of the aggregated points. 609 type FloatMovingAverageReducer struct { 610 pos int 611 sum float64 612 time int64 613 buf []float64 614 } 615 616 // NewFloatMovingAverageReducer creates a new FloatMovingAverageReducer. 617 func NewFloatMovingAverageReducer(n int) *FloatMovingAverageReducer { 618 return &FloatMovingAverageReducer{ 619 buf: make([]float64, 0, n), 620 } 621 } 622 623 // AggregateFloat aggregates a point into the reducer and updates the current window. 624 func (r *FloatMovingAverageReducer) AggregateFloat(p *FloatPoint) { 625 if len(r.buf) != cap(r.buf) { 626 r.buf = append(r.buf, p.Value) 627 } else { 628 r.sum -= r.buf[r.pos] 629 r.buf[r.pos] = p.Value 630 } 631 r.sum += p.Value 632 r.time = p.Time 633 r.pos++ 634 if r.pos >= cap(r.buf) { 635 r.pos = 0 636 } 637 } 638 639 // Emit emits the moving average of the current window. Emit should be called 640 // after every call to AggregateFloat and it will produce one point if there 641 // is enough data to fill a window, otherwise it will produce zero points. 642 func (r *FloatMovingAverageReducer) Emit() []FloatPoint { 643 if len(r.buf) != cap(r.buf) { 644 return []FloatPoint{} 645 } 646 return []FloatPoint{ 647 { 648 Value: r.sum / float64(len(r.buf)), 649 Time: r.time, 650 Aggregated: uint32(len(r.buf)), 651 }, 652 } 653 } 654 655 // IntegerMovingAverageReducer calculates the moving average of the aggregated points. 656 type IntegerMovingAverageReducer struct { 657 pos int 658 sum int64 659 time int64 660 buf []int64 661 } 662 663 // NewIntegerMovingAverageReducer creates a new IntegerMovingAverageReducer. 664 func NewIntegerMovingAverageReducer(n int) *IntegerMovingAverageReducer { 665 return &IntegerMovingAverageReducer{ 666 buf: make([]int64, 0, n), 667 } 668 } 669 670 // AggregateInteger aggregates a point into the reducer and updates the current window. 671 func (r *IntegerMovingAverageReducer) AggregateInteger(p *IntegerPoint) { 672 if len(r.buf) != cap(r.buf) { 673 r.buf = append(r.buf, p.Value) 674 } else { 675 r.sum -= r.buf[r.pos] 676 r.buf[r.pos] = p.Value 677 } 678 r.sum += p.Value 679 r.time = p.Time 680 r.pos++ 681 if r.pos >= cap(r.buf) { 682 r.pos = 0 683 } 684 } 685 686 // Emit emits the moving average of the current window. Emit should be called 687 // after every call to AggregateInteger and it will produce one point if there 688 // is enough data to fill a window, otherwise it will produce zero points. 689 func (r *IntegerMovingAverageReducer) Emit() []FloatPoint { 690 if len(r.buf) != cap(r.buf) { 691 return []FloatPoint{} 692 } 693 return []FloatPoint{ 694 { 695 Value: float64(r.sum) / float64(len(r.buf)), 696 Time: r.time, 697 Aggregated: uint32(len(r.buf)), 698 }, 699 } 700 } 701 702 // UnsignedMovingAverageReducer calculates the moving average of the aggregated points. 703 type UnsignedMovingAverageReducer struct { 704 pos int 705 sum uint64 706 time int64 707 buf []uint64 708 } 709 710 // NewUnsignedMovingAverageReducer creates a new UnsignedMovingAverageReducer. 711 func NewUnsignedMovingAverageReducer(n int) *UnsignedMovingAverageReducer { 712 return &UnsignedMovingAverageReducer{ 713 buf: make([]uint64, 0, n), 714 } 715 } 716 717 // AggregateUnsigned aggregates a point into the reducer and updates the current window. 718 func (r *UnsignedMovingAverageReducer) AggregateUnsigned(p *UnsignedPoint) { 719 if len(r.buf) != cap(r.buf) { 720 r.buf = append(r.buf, p.Value) 721 } else { 722 r.sum -= r.buf[r.pos] 723 r.buf[r.pos] = p.Value 724 } 725 r.sum += p.Value 726 r.time = p.Time 727 r.pos++ 728 if r.pos >= cap(r.buf) { 729 r.pos = 0 730 } 731 } 732 733 // Emit emits the moving average of the current window. Emit should be called 734 // after every call to AggregateUnsigned and it will produce one point if there 735 // is enough data to fill a window, otherwise it will produce zero points. 736 func (r *UnsignedMovingAverageReducer) Emit() []FloatPoint { 737 if len(r.buf) != cap(r.buf) { 738 return []FloatPoint{} 739 } 740 return []FloatPoint{ 741 { 742 Value: float64(r.sum) / float64(len(r.buf)), 743 Time: r.time, 744 Aggregated: uint32(len(r.buf)), 745 }, 746 } 747 } 748 749 type ExponentialMovingAverageReducer struct { 750 ema gota.EMA 751 holdPeriod uint32 752 count uint32 753 v float64 754 t int64 755 } 756 757 func NewExponentialMovingAverageReducer(period int, holdPeriod int, warmupType gota.WarmupType) *ExponentialMovingAverageReducer { 758 ema := gota.NewEMA(period, warmupType) 759 if holdPeriod == -1 { 760 holdPeriod = ema.WarmCount() 761 } 762 return &ExponentialMovingAverageReducer{ 763 ema: *ema, 764 holdPeriod: uint32(holdPeriod), 765 } 766 } 767 768 func (r *ExponentialMovingAverageReducer) AggregateFloat(p *FloatPoint) { 769 r.aggregate(p.Value, p.Time) 770 } 771 func (r *ExponentialMovingAverageReducer) AggregateInteger(p *IntegerPoint) { 772 r.aggregate(float64(p.Value), p.Time) 773 } 774 func (r *ExponentialMovingAverageReducer) AggregateUnsigned(p *UnsignedPoint) { 775 r.aggregate(float64(p.Value), p.Time) 776 } 777 func (r *ExponentialMovingAverageReducer) aggregate(v float64, t int64) { 778 r.v = r.ema.Add(v) 779 r.t = t 780 r.count++ 781 } 782 func (r *ExponentialMovingAverageReducer) Emit() []FloatPoint { 783 if r.count <= r.holdPeriod { 784 return nil 785 } 786 787 return []FloatPoint{ 788 { 789 Value: r.v, 790 Time: r.t, 791 Aggregated: r.count, 792 }, 793 } 794 } 795 796 type DoubleExponentialMovingAverageReducer struct { 797 dema gota.DEMA 798 holdPeriod uint32 799 count uint32 800 v float64 801 t int64 802 } 803 804 func NewDoubleExponentialMovingAverageReducer(period int, holdPeriod int, warmupType gota.WarmupType) *DoubleExponentialMovingAverageReducer { 805 dema := gota.NewDEMA(period, warmupType) 806 if holdPeriod == -1 { 807 holdPeriod = dema.WarmCount() 808 } 809 return &DoubleExponentialMovingAverageReducer{ 810 dema: *dema, 811 holdPeriod: uint32(holdPeriod), 812 } 813 } 814 815 func (r *DoubleExponentialMovingAverageReducer) AggregateFloat(p *FloatPoint) { 816 r.aggregate(p.Value, p.Time) 817 } 818 func (r *DoubleExponentialMovingAverageReducer) AggregateInteger(p *IntegerPoint) { 819 r.aggregate(float64(p.Value), p.Time) 820 } 821 func (r *DoubleExponentialMovingAverageReducer) AggregateUnsigned(p *UnsignedPoint) { 822 r.aggregate(float64(p.Value), p.Time) 823 } 824 func (r *DoubleExponentialMovingAverageReducer) aggregate(v float64, t int64) { 825 r.v = r.dema.Add(v) 826 r.t = t 827 r.count++ 828 } 829 func (r *DoubleExponentialMovingAverageReducer) Emit() []FloatPoint { 830 if r.count <= r.holdPeriod { 831 return nil 832 } 833 834 return []FloatPoint{ 835 { 836 Value: r.v, 837 Time: r.t, 838 Aggregated: r.count, 839 }, 840 } 841 } 842 843 type TripleExponentialMovingAverageReducer struct { 844 tema gota.TEMA 845 holdPeriod uint32 846 count uint32 847 v float64 848 t int64 849 } 850 851 func NewTripleExponentialMovingAverageReducer(period int, holdPeriod int, warmupType gota.WarmupType) *TripleExponentialMovingAverageReducer { 852 tema := gota.NewTEMA(period, warmupType) 853 if holdPeriod == -1 { 854 holdPeriod = tema.WarmCount() 855 } 856 return &TripleExponentialMovingAverageReducer{ 857 tema: *tema, 858 holdPeriod: uint32(holdPeriod), 859 } 860 } 861 862 func (r *TripleExponentialMovingAverageReducer) AggregateFloat(p *FloatPoint) { 863 r.aggregate(p.Value, p.Time) 864 } 865 func (r *TripleExponentialMovingAverageReducer) AggregateInteger(p *IntegerPoint) { 866 r.aggregate(float64(p.Value), p.Time) 867 } 868 func (r *TripleExponentialMovingAverageReducer) AggregateUnsigned(p *UnsignedPoint) { 869 r.aggregate(float64(p.Value), p.Time) 870 } 871 func (r *TripleExponentialMovingAverageReducer) aggregate(v float64, t int64) { 872 r.v = r.tema.Add(v) 873 r.t = t 874 r.count++ 875 } 876 func (r *TripleExponentialMovingAverageReducer) Emit() []FloatPoint { 877 if r.count <= r.holdPeriod { 878 return nil 879 } 880 881 return []FloatPoint{ 882 { 883 Value: r.v, 884 Time: r.t, 885 Aggregated: r.count, 886 }, 887 } 888 } 889 890 type RelativeStrengthIndexReducer struct { 891 rsi gota.RSI 892 holdPeriod uint32 893 count uint32 894 v float64 895 t int64 896 } 897 898 func NewRelativeStrengthIndexReducer(period int, holdPeriod int, warmupType gota.WarmupType) *RelativeStrengthIndexReducer { 899 rsi := gota.NewRSI(period, warmupType) 900 if holdPeriod == -1 { 901 holdPeriod = rsi.WarmCount() 902 } 903 return &RelativeStrengthIndexReducer{ 904 rsi: *rsi, 905 holdPeriod: uint32(holdPeriod), 906 } 907 } 908 func (r *RelativeStrengthIndexReducer) AggregateFloat(p *FloatPoint) { 909 r.aggregate(p.Value, p.Time) 910 } 911 func (r *RelativeStrengthIndexReducer) AggregateInteger(p *IntegerPoint) { 912 r.aggregate(float64(p.Value), p.Time) 913 } 914 func (r *RelativeStrengthIndexReducer) AggregateUnsigned(p *UnsignedPoint) { 915 r.aggregate(float64(p.Value), p.Time) 916 } 917 func (r *RelativeStrengthIndexReducer) aggregate(v float64, t int64) { 918 r.v = r.rsi.Add(v) 919 r.t = t 920 r.count++ 921 } 922 func (r *RelativeStrengthIndexReducer) Emit() []FloatPoint { 923 if r.count <= r.holdPeriod { 924 return nil 925 } 926 927 return []FloatPoint{ 928 { 929 Value: r.v, 930 Time: r.t, 931 Aggregated: r.count, 932 }, 933 } 934 } 935 936 type TripleExponentialDerivativeReducer struct { 937 trix gota.TRIX 938 holdPeriod uint32 939 count uint32 940 v float64 941 t int64 942 } 943 944 func NewTripleExponentialDerivativeReducer(period int, holdPeriod int, warmupType gota.WarmupType) *TripleExponentialDerivativeReducer { 945 trix := gota.NewTRIX(period, warmupType) 946 if holdPeriod == -1 { 947 holdPeriod = trix.WarmCount() 948 } 949 return &TripleExponentialDerivativeReducer{ 950 trix: *trix, 951 holdPeriod: uint32(holdPeriod), 952 } 953 } 954 func (r *TripleExponentialDerivativeReducer) AggregateFloat(p *FloatPoint) { 955 r.aggregate(p.Value, p.Time) 956 } 957 func (r *TripleExponentialDerivativeReducer) AggregateInteger(p *IntegerPoint) { 958 r.aggregate(float64(p.Value), p.Time) 959 } 960 func (r *TripleExponentialDerivativeReducer) AggregateUnsigned(p *UnsignedPoint) { 961 r.aggregate(float64(p.Value), p.Time) 962 } 963 func (r *TripleExponentialDerivativeReducer) aggregate(v float64, t int64) { 964 r.v = r.trix.Add(v) 965 r.t = t 966 r.count++ 967 } 968 func (r *TripleExponentialDerivativeReducer) Emit() []FloatPoint { 969 if r.count <= r.holdPeriod { 970 return nil 971 } 972 if math.IsInf(r.v, 0) { 973 return nil 974 } 975 976 return []FloatPoint{ 977 { 978 Value: r.v, 979 Time: r.t, 980 Aggregated: r.count, 981 }, 982 } 983 } 984 985 type KaufmansEfficiencyRatioReducer struct { 986 ker gota.KER 987 holdPeriod uint32 988 count uint32 989 v float64 990 t int64 991 } 992 993 func NewKaufmansEfficiencyRatioReducer(period int, holdPeriod int) *KaufmansEfficiencyRatioReducer { 994 ker := gota.NewKER(period) 995 if holdPeriod == -1 { 996 holdPeriod = ker.WarmCount() 997 } 998 return &KaufmansEfficiencyRatioReducer{ 999 ker: *ker, 1000 holdPeriod: uint32(holdPeriod), 1001 } 1002 } 1003 func (r *KaufmansEfficiencyRatioReducer) AggregateFloat(p *FloatPoint) { 1004 r.aggregate(p.Value, p.Time) 1005 } 1006 func (r *KaufmansEfficiencyRatioReducer) AggregateInteger(p *IntegerPoint) { 1007 r.aggregate(float64(p.Value), p.Time) 1008 } 1009 func (r *KaufmansEfficiencyRatioReducer) AggregateUnsigned(p *UnsignedPoint) { 1010 r.aggregate(float64(p.Value), p.Time) 1011 } 1012 func (r *KaufmansEfficiencyRatioReducer) aggregate(v float64, t int64) { 1013 r.v = r.ker.Add(v) 1014 r.t = t 1015 r.count++ 1016 } 1017 func (r *KaufmansEfficiencyRatioReducer) Emit() []FloatPoint { 1018 if r.count <= r.holdPeriod { 1019 return nil 1020 } 1021 if math.IsInf(r.v, 0) { 1022 return nil 1023 } 1024 1025 return []FloatPoint{ 1026 { 1027 Value: r.v, 1028 Time: r.t, 1029 Aggregated: r.count, 1030 }, 1031 } 1032 } 1033 1034 type KaufmansAdaptiveMovingAverageReducer struct { 1035 kama gota.KAMA 1036 holdPeriod uint32 1037 count uint32 1038 v float64 1039 t int64 1040 } 1041 1042 func NewKaufmansAdaptiveMovingAverageReducer(period int, holdPeriod int) *KaufmansAdaptiveMovingAverageReducer { 1043 kama := gota.NewKAMA(period) 1044 if holdPeriod == -1 { 1045 holdPeriod = kama.WarmCount() 1046 } 1047 return &KaufmansAdaptiveMovingAverageReducer{ 1048 kama: *kama, 1049 holdPeriod: uint32(holdPeriod), 1050 } 1051 } 1052 func (r *KaufmansAdaptiveMovingAverageReducer) AggregateFloat(p *FloatPoint) { 1053 r.aggregate(p.Value, p.Time) 1054 } 1055 func (r *KaufmansAdaptiveMovingAverageReducer) AggregateInteger(p *IntegerPoint) { 1056 r.aggregate(float64(p.Value), p.Time) 1057 } 1058 func (r *KaufmansAdaptiveMovingAverageReducer) AggregateUnsigned(p *UnsignedPoint) { 1059 r.aggregate(float64(p.Value), p.Time) 1060 } 1061 func (r *KaufmansAdaptiveMovingAverageReducer) aggregate(v float64, t int64) { 1062 r.v = r.kama.Add(v) 1063 r.t = t 1064 r.count++ 1065 } 1066 func (r *KaufmansAdaptiveMovingAverageReducer) Emit() []FloatPoint { 1067 if r.count <= r.holdPeriod { 1068 return nil 1069 } 1070 if math.IsInf(r.v, 0) { 1071 return nil 1072 } 1073 1074 return []FloatPoint{ 1075 { 1076 Value: r.v, 1077 Time: r.t, 1078 Aggregated: r.count, 1079 }, 1080 } 1081 } 1082 1083 type ChandeMomentumOscillatorReducer struct { 1084 cmo gota.AlgSimple 1085 holdPeriod uint32 1086 count uint32 1087 v float64 1088 t int64 1089 } 1090 1091 func NewChandeMomentumOscillatorReducer(period int, holdPeriod int, warmupType gota.WarmupType) *ChandeMomentumOscillatorReducer { 1092 var cmo gota.AlgSimple 1093 if warmupType == gota.WarmupType(-1) { 1094 cmo = gota.NewCMO(period) 1095 } else { 1096 cmo = gota.NewCMOS(period, warmupType) 1097 } 1098 1099 if holdPeriod == -1 { 1100 holdPeriod = cmo.WarmCount() 1101 } 1102 return &ChandeMomentumOscillatorReducer{ 1103 cmo: cmo, 1104 holdPeriod: uint32(holdPeriod), 1105 } 1106 } 1107 func (r *ChandeMomentumOscillatorReducer) AggregateFloat(p *FloatPoint) { 1108 r.aggregate(p.Value, p.Time) 1109 } 1110 func (r *ChandeMomentumOscillatorReducer) AggregateInteger(p *IntegerPoint) { 1111 r.aggregate(float64(p.Value), p.Time) 1112 } 1113 func (r *ChandeMomentumOscillatorReducer) AggregateUnsigned(p *UnsignedPoint) { 1114 r.aggregate(float64(p.Value), p.Time) 1115 } 1116 func (r *ChandeMomentumOscillatorReducer) aggregate(v float64, t int64) { 1117 r.v = r.cmo.Add(v) 1118 r.t = t 1119 r.count++ 1120 } 1121 func (r *ChandeMomentumOscillatorReducer) Emit() []FloatPoint { 1122 if r.count <= r.holdPeriod { 1123 return nil 1124 } 1125 1126 return []FloatPoint{ 1127 { 1128 Value: r.v, 1129 Time: r.t, 1130 Aggregated: r.count, 1131 }, 1132 } 1133 } 1134 1135 // FloatCumulativeSumReducer cumulates the values from each point. 1136 type FloatCumulativeSumReducer struct { 1137 curr FloatPoint 1138 } 1139 1140 // NewFloatCumulativeSumReducer creates a new FloatCumulativeSumReducer. 1141 func NewFloatCumulativeSumReducer() *FloatCumulativeSumReducer { 1142 return &FloatCumulativeSumReducer{ 1143 curr: FloatPoint{Nil: true}, 1144 } 1145 } 1146 1147 func (r *FloatCumulativeSumReducer) AggregateFloat(p *FloatPoint) { 1148 r.curr.Value += p.Value 1149 r.curr.Time = p.Time 1150 r.curr.Nil = false 1151 } 1152 1153 func (r *FloatCumulativeSumReducer) Emit() []FloatPoint { 1154 var pts []FloatPoint 1155 if !r.curr.Nil { 1156 pts = []FloatPoint{r.curr} 1157 } 1158 return pts 1159 } 1160 1161 // IntegerCumulativeSumReducer cumulates the values from each point. 1162 type IntegerCumulativeSumReducer struct { 1163 curr IntegerPoint 1164 } 1165 1166 // NewIntegerCumulativeSumReducer creates a new IntegerCumulativeSumReducer. 1167 func NewIntegerCumulativeSumReducer() *IntegerCumulativeSumReducer { 1168 return &IntegerCumulativeSumReducer{ 1169 curr: IntegerPoint{Nil: true}, 1170 } 1171 } 1172 1173 func (r *IntegerCumulativeSumReducer) AggregateInteger(p *IntegerPoint) { 1174 r.curr.Value += p.Value 1175 r.curr.Time = p.Time 1176 r.curr.Nil = false 1177 } 1178 1179 func (r *IntegerCumulativeSumReducer) Emit() []IntegerPoint { 1180 var pts []IntegerPoint 1181 if !r.curr.Nil { 1182 pts = []IntegerPoint{r.curr} 1183 } 1184 return pts 1185 } 1186 1187 // UnsignedCumulativeSumReducer cumulates the values from each point. 1188 type UnsignedCumulativeSumReducer struct { 1189 curr UnsignedPoint 1190 } 1191 1192 // NewUnsignedCumulativeSumReducer creates a new UnsignedCumulativeSumReducer. 1193 func NewUnsignedCumulativeSumReducer() *UnsignedCumulativeSumReducer { 1194 return &UnsignedCumulativeSumReducer{ 1195 curr: UnsignedPoint{Nil: true}, 1196 } 1197 } 1198 1199 func (r *UnsignedCumulativeSumReducer) AggregateUnsigned(p *UnsignedPoint) { 1200 r.curr.Value += p.Value 1201 r.curr.Time = p.Time 1202 r.curr.Nil = false 1203 } 1204 1205 func (r *UnsignedCumulativeSumReducer) Emit() []UnsignedPoint { 1206 var pts []UnsignedPoint 1207 if !r.curr.Nil { 1208 pts = []UnsignedPoint{r.curr} 1209 } 1210 return pts 1211 } 1212 1213 // FloatHoltWintersReducer forecasts a series into the future. 1214 // This is done using the Holt-Winters damped method. 1215 // 1. Using the series the initial values are calculated using a SSE. 1216 // 2. The series is forecasted into the future using the iterative relations. 1217 type FloatHoltWintersReducer struct { 1218 // Season period 1219 m int 1220 seasonal bool 1221 1222 // Horizon 1223 h int 1224 1225 // Interval between points 1226 interval int64 1227 // interval / 2 -- used to perform rounding 1228 halfInterval int64 1229 1230 // Whether to include all data or only future values 1231 includeFitData bool 1232 1233 // NelderMead optimizer 1234 optim *neldermead.Optimizer 1235 // Small difference bound for the optimizer 1236 epsilon float64 1237 1238 y []float64 1239 points []FloatPoint 1240 } 1241 1242 const ( 1243 // Arbitrary weight for initializing some initial guesses. 1244 // This should be in the range [0,1] 1245 hwWeight = 0.5 1246 // Epsilon value for the minimization process 1247 hwDefaultEpsilon = 1.0e-4 1248 // Define a grid of initial guesses for the parameters: alpha, beta, gamma, and phi. 1249 // Keep in mind that this grid is N^4 so we should keep N small 1250 // The starting lower guess 1251 hwGuessLower = 0.3 1252 // The upper bound on the grid 1253 hwGuessUpper = 1.0 1254 // The step between guesses 1255 hwGuessStep = 0.4 1256 ) 1257 1258 // NewFloatHoltWintersReducer creates a new FloatHoltWintersReducer. 1259 func NewFloatHoltWintersReducer(h, m int, includeFitData bool, interval time.Duration) *FloatHoltWintersReducer { 1260 seasonal := true 1261 if m < 2 { 1262 seasonal = false 1263 } 1264 return &FloatHoltWintersReducer{ 1265 h: h, 1266 m: m, 1267 seasonal: seasonal, 1268 includeFitData: includeFitData, 1269 interval: int64(interval), 1270 halfInterval: int64(interval) / 2, 1271 optim: neldermead.New(), 1272 epsilon: hwDefaultEpsilon, 1273 } 1274 } 1275 1276 func (r *FloatHoltWintersReducer) aggregate(time int64, value float64) { 1277 r.points = append(r.points, FloatPoint{ 1278 Time: time, 1279 Value: value, 1280 }) 1281 } 1282 1283 // AggregateFloat aggregates a point into the reducer and updates the current window. 1284 func (r *FloatHoltWintersReducer) AggregateFloat(p *FloatPoint) { 1285 r.aggregate(p.Time, p.Value) 1286 } 1287 1288 // AggregateInteger aggregates a point into the reducer and updates the current window. 1289 func (r *FloatHoltWintersReducer) AggregateInteger(p *IntegerPoint) { 1290 r.aggregate(p.Time, float64(p.Value)) 1291 } 1292 1293 func (r *FloatHoltWintersReducer) roundTime(t int64) int64 { 1294 // Overflow safe round function 1295 remainder := t % r.interval 1296 if remainder > r.halfInterval { 1297 // Round up 1298 return (t/r.interval + 1) * r.interval 1299 } 1300 // Round down 1301 return (t / r.interval) * r.interval 1302 } 1303 1304 // Emit returns the points generated by the HoltWinters algorithm. 1305 func (r *FloatHoltWintersReducer) Emit() []FloatPoint { 1306 if l := len(r.points); l < 2 || r.seasonal && l < r.m || r.h <= 0 { 1307 return nil 1308 } 1309 // First fill in r.y with values and NaNs for missing values 1310 start, stop := r.roundTime(r.points[0].Time), r.roundTime(r.points[len(r.points)-1].Time) 1311 count := (stop - start) / r.interval 1312 if count <= 0 { 1313 return nil 1314 } 1315 r.y = make([]float64, 1, count) 1316 r.y[0] = r.points[0].Value 1317 t := r.roundTime(r.points[0].Time) 1318 for _, p := range r.points[1:] { 1319 rounded := r.roundTime(p.Time) 1320 if rounded <= t { 1321 // Drop values that occur for the same time bucket 1322 continue 1323 } 1324 t += r.interval 1325 // Add any missing values before the next point 1326 for rounded != t { 1327 // Add in a NaN so we can skip it later. 1328 r.y = append(r.y, math.NaN()) 1329 t += r.interval 1330 } 1331 r.y = append(r.y, p.Value) 1332 } 1333 1334 // Seasonality 1335 m := r.m 1336 1337 // Starting guesses 1338 // NOTE: Since these values are guesses 1339 // in the cases where we were missing data, 1340 // we can just skip the value and call it good. 1341 1342 l0 := 0.0 1343 if r.seasonal { 1344 for i := 0; i < m; i++ { 1345 if !math.IsNaN(r.y[i]) { 1346 l0 += (1 / float64(m)) * r.y[i] 1347 } 1348 } 1349 } else { 1350 l0 += hwWeight * r.y[0] 1351 } 1352 1353 b0 := 0.0 1354 if r.seasonal { 1355 for i := 0; i < m && m+i < len(r.y); i++ { 1356 if !math.IsNaN(r.y[i]) && !math.IsNaN(r.y[m+i]) { 1357 b0 += 1 / float64(m*m) * (r.y[m+i] - r.y[i]) 1358 } 1359 } 1360 } else { 1361 if !math.IsNaN(r.y[1]) { 1362 b0 = hwWeight * (r.y[1] - r.y[0]) 1363 } 1364 } 1365 1366 var s []float64 1367 if r.seasonal { 1368 s = make([]float64, m) 1369 for i := 0; i < m; i++ { 1370 if !math.IsNaN(r.y[i]) { 1371 s[i] = r.y[i] / l0 1372 } else { 1373 s[i] = 0 1374 } 1375 } 1376 } 1377 1378 parameters := make([]float64, 6+len(s)) 1379 parameters[4] = l0 1380 parameters[5] = b0 1381 o := len(parameters) - len(s) 1382 for i := range s { 1383 parameters[i+o] = s[i] 1384 } 1385 1386 // Determine best fit for the various parameters 1387 minSSE := math.Inf(1) 1388 var bestParams []float64 1389 for alpha := hwGuessLower; alpha < hwGuessUpper; alpha += hwGuessStep { 1390 for beta := hwGuessLower; beta < hwGuessUpper; beta += hwGuessStep { 1391 for gamma := hwGuessLower; gamma < hwGuessUpper; gamma += hwGuessStep { 1392 for phi := hwGuessLower; phi < hwGuessUpper; phi += hwGuessStep { 1393 parameters[0] = alpha 1394 parameters[1] = beta 1395 parameters[2] = gamma 1396 parameters[3] = phi 1397 sse, params := r.optim.Optimize(r.sse, parameters, r.epsilon, 1) 1398 if sse < minSSE || bestParams == nil { 1399 minSSE = sse 1400 bestParams = params 1401 } 1402 } 1403 } 1404 } 1405 } 1406 1407 // Forecast 1408 forecasted := r.forecast(r.h, bestParams) 1409 var points []FloatPoint 1410 if r.includeFitData { 1411 start := r.points[0].Time 1412 points = make([]FloatPoint, 0, len(forecasted)) 1413 for i, v := range forecasted { 1414 if !math.IsNaN(v) { 1415 t := start + r.interval*(int64(i)) 1416 points = append(points, FloatPoint{ 1417 Value: v, 1418 Time: t, 1419 }) 1420 } 1421 } 1422 } else { 1423 stop := r.points[len(r.points)-1].Time 1424 points = make([]FloatPoint, 0, r.h) 1425 for i, v := range forecasted[len(r.y):] { 1426 if !math.IsNaN(v) { 1427 t := stop + r.interval*(int64(i)+1) 1428 points = append(points, FloatPoint{ 1429 Value: v, 1430 Time: t, 1431 }) 1432 } 1433 } 1434 } 1435 // Clear data set 1436 r.y = r.y[0:0] 1437 return points 1438 } 1439 1440 // Using the recursive relations compute the next values 1441 func (r *FloatHoltWintersReducer) next(alpha, beta, gamma, phi, phiH, yT, lTp, bTp, sTm, sTmh float64) (yTh, lT, bT, sT float64) { 1442 lT = alpha*(yT/sTm) + (1-alpha)*(lTp+phi*bTp) 1443 bT = beta*(lT-lTp) + (1-beta)*phi*bTp 1444 sT = gamma*(yT/(lTp+phi*bTp)) + (1-gamma)*sTm 1445 yTh = (lT + phiH*bT) * sTmh 1446 return 1447 } 1448 1449 // Forecast the data h points into the future. 1450 func (r *FloatHoltWintersReducer) forecast(h int, params []float64) []float64 { 1451 // Constrain parameters 1452 r.constrain(params) 1453 1454 yT := r.y[0] 1455 1456 phi := params[3] 1457 phiH := phi 1458 1459 lT := params[4] 1460 bT := params[5] 1461 1462 // seasonals is a ring buffer of past sT values 1463 var seasonals []float64 1464 var m, so int 1465 if r.seasonal { 1466 seasonals = params[6:] 1467 m = len(params[6:]) 1468 if m == 1 { 1469 seasonals[0] = 1 1470 } 1471 // Season index offset 1472 so = m - 1 1473 } 1474 1475 forecasted := make([]float64, len(r.y)+h) 1476 forecasted[0] = yT 1477 l := len(r.y) 1478 var hm int 1479 stm, stmh := 1.0, 1.0 1480 for t := 1; t < l+h; t++ { 1481 if r.seasonal { 1482 hm = t % m 1483 stm = seasonals[(t-m+so)%m] 1484 stmh = seasonals[(t-m+hm+so)%m] 1485 } 1486 var sT float64 1487 yT, lT, bT, sT = r.next( 1488 params[0], // alpha 1489 params[1], // beta 1490 params[2], // gamma 1491 phi, 1492 phiH, 1493 yT, 1494 lT, 1495 bT, 1496 stm, 1497 stmh, 1498 ) 1499 phiH += math.Pow(phi, float64(t)) 1500 1501 if r.seasonal { 1502 seasonals[(t+so)%m] = sT 1503 so++ 1504 } 1505 1506 forecasted[t] = yT 1507 } 1508 return forecasted 1509 } 1510 1511 // Compute sum squared error for the given parameters. 1512 func (r *FloatHoltWintersReducer) sse(params []float64) float64 { 1513 sse := 0.0 1514 forecasted := r.forecast(0, params) 1515 for i := range forecasted { 1516 // Skip missing values since we cannot use them to compute an error. 1517 if !math.IsNaN(r.y[i]) { 1518 // Compute error 1519 if math.IsNaN(forecasted[i]) { 1520 // Penalize forecasted NaNs 1521 return math.Inf(1) 1522 } 1523 diff := forecasted[i] - r.y[i] 1524 sse += diff * diff 1525 } 1526 } 1527 return sse 1528 } 1529 1530 // Constrain alpha, beta, gamma, phi in the range [0, 1] 1531 func (r *FloatHoltWintersReducer) constrain(x []float64) { 1532 // alpha 1533 if x[0] > 1 { 1534 x[0] = 1 1535 } 1536 if x[0] < 0 { 1537 x[0] = 0 1538 } 1539 // beta 1540 if x[1] > 1 { 1541 x[1] = 1 1542 } 1543 if x[1] < 0 { 1544 x[1] = 0 1545 } 1546 // gamma 1547 if x[2] > 1 { 1548 x[2] = 1 1549 } 1550 if x[2] < 0 { 1551 x[2] = 0 1552 } 1553 // phi 1554 if x[3] > 1 { 1555 x[3] = 1 1556 } 1557 if x[3] < 0 { 1558 x[3] = 0 1559 } 1560 } 1561 1562 // FloatIntegralReducer calculates the time-integral of the aggregated points. 1563 type FloatIntegralReducer struct { 1564 interval Interval 1565 sum float64 1566 prev FloatPoint 1567 window struct { 1568 start int64 1569 end int64 1570 } 1571 ch chan FloatPoint 1572 opt IteratorOptions 1573 } 1574 1575 // NewFloatIntegralReducer creates a new FloatIntegralReducer. 1576 func NewFloatIntegralReducer(interval Interval, opt IteratorOptions) *FloatIntegralReducer { 1577 return &FloatIntegralReducer{ 1578 interval: interval, 1579 prev: FloatPoint{Nil: true}, 1580 ch: make(chan FloatPoint, 1), 1581 opt: opt, 1582 } 1583 } 1584 1585 // AggregateFloat aggregates a point into the reducer. 1586 func (r *FloatIntegralReducer) AggregateFloat(p *FloatPoint) { 1587 // If this is the first point, just save it 1588 if r.prev.Nil { 1589 r.prev = *p 1590 if !r.opt.Interval.IsZero() { 1591 // Record the end of the time interval. 1592 // We do not care for whether the last number is inclusive or exclusive 1593 // because we treat both the same for the involved math. 1594 if r.opt.Ascending { 1595 r.window.start, r.window.end = r.opt.Window(p.Time) 1596 } else { 1597 r.window.end, r.window.start = r.opt.Window(p.Time) 1598 } 1599 } 1600 return 1601 } 1602 1603 // If this point has the same timestamp as the previous one, 1604 // skip the point. Points sent into this reducer are expected 1605 // to be fed in order. 1606 if r.prev.Time == p.Time { 1607 r.prev = *p 1608 return 1609 } else if !r.opt.Interval.IsZero() && ((r.opt.Ascending && p.Time >= r.window.end) || (!r.opt.Ascending && p.Time <= r.window.end)) { 1610 // If our previous time is not equal to the window, we need to 1611 // interpolate the area at the end of this interval. 1612 if r.prev.Time != r.window.end { 1613 value := linearFloat(r.window.end, r.prev.Time, p.Time, r.prev.Value, p.Value) 1614 elapsed := float64(r.window.end-r.prev.Time) / float64(r.interval.Duration) 1615 r.sum += 0.5 * (value + r.prev.Value) * elapsed 1616 1617 r.prev.Value = value 1618 r.prev.Time = r.window.end 1619 } 1620 1621 // Emit the current point through the channel and then clear it. 1622 r.ch <- FloatPoint{Time: r.window.start, Value: r.sum} 1623 if r.opt.Ascending { 1624 r.window.start, r.window.end = r.opt.Window(p.Time) 1625 } else { 1626 r.window.end, r.window.start = r.opt.Window(p.Time) 1627 } 1628 r.sum = 0.0 1629 } 1630 1631 // Normal operation: update the sum using the trapezium rule 1632 elapsed := float64(p.Time-r.prev.Time) / float64(r.interval.Duration) 1633 r.sum += 0.5 * (p.Value + r.prev.Value) * elapsed 1634 r.prev = *p 1635 } 1636 1637 // Emit emits the time-integral of the aggregated points as a single point. 1638 // InfluxQL convention dictates that outside a group-by-time clause we return 1639 // a timestamp of zero. Within a group-by-time, we can set the time to ZeroTime 1640 // and a higher level will change it to the start of the time group. 1641 func (r *FloatIntegralReducer) Emit() []FloatPoint { 1642 select { 1643 case pt, ok := <-r.ch: 1644 if !ok { 1645 return nil 1646 } 1647 return []FloatPoint{pt} 1648 default: 1649 return nil 1650 } 1651 } 1652 1653 // Close flushes any in progress points to ensure any remaining points are 1654 // emitted. 1655 func (r *FloatIntegralReducer) Close() error { 1656 // If our last point is at the start time, then discard this point since 1657 // there is no area within this bucket. Otherwise, send off what we 1658 // currently have as the final point. 1659 if !r.prev.Nil && r.prev.Time != r.window.start { 1660 r.ch <- FloatPoint{Time: r.window.start, Value: r.sum} 1661 } 1662 close(r.ch) 1663 return nil 1664 } 1665 1666 // IntegerIntegralReducer calculates the time-integral of the aggregated points. 1667 type IntegerIntegralReducer struct { 1668 interval Interval 1669 sum float64 1670 prev IntegerPoint 1671 window struct { 1672 start int64 1673 end int64 1674 } 1675 ch chan FloatPoint 1676 opt IteratorOptions 1677 } 1678 1679 // NewIntegerIntegralReducer creates a new IntegerIntegralReducer. 1680 func NewIntegerIntegralReducer(interval Interval, opt IteratorOptions) *IntegerIntegralReducer { 1681 return &IntegerIntegralReducer{ 1682 interval: interval, 1683 prev: IntegerPoint{Nil: true}, 1684 ch: make(chan FloatPoint, 1), 1685 opt: opt, 1686 } 1687 } 1688 1689 // AggregateInteger aggregates a point into the reducer. 1690 func (r *IntegerIntegralReducer) AggregateInteger(p *IntegerPoint) { 1691 // If this is the first point, just save it 1692 if r.prev.Nil { 1693 r.prev = *p 1694 1695 // Record the end of the time interval. 1696 // We do not care for whether the last number is inclusive or exclusive 1697 // because we treat both the same for the involved math. 1698 if r.opt.Ascending { 1699 r.window.start, r.window.end = r.opt.Window(p.Time) 1700 } else { 1701 r.window.end, r.window.start = r.opt.Window(p.Time) 1702 } 1703 1704 // If we see the minimum allowable time, set the time to zero so we don't 1705 // break the default returned time for aggregate queries without times. 1706 if r.window.start == influxql.MinTime { 1707 r.window.start = 0 1708 } 1709 return 1710 } 1711 1712 // If this point has the same timestamp as the previous one, 1713 // skip the point. Points sent into this reducer are expected 1714 // to be fed in order. 1715 value := float64(p.Value) 1716 if r.prev.Time == p.Time { 1717 r.prev = *p 1718 return 1719 } else if (r.opt.Ascending && p.Time >= r.window.end) || (!r.opt.Ascending && p.Time <= r.window.end) { 1720 // If our previous time is not equal to the window, we need to 1721 // interpolate the area at the end of this interval. 1722 if r.prev.Time != r.window.end { 1723 value = linearFloat(r.window.end, r.prev.Time, p.Time, float64(r.prev.Value), value) 1724 elapsed := float64(r.window.end-r.prev.Time) / float64(r.interval.Duration) 1725 r.sum += 0.5 * (value + float64(r.prev.Value)) * elapsed 1726 1727 r.prev.Time = r.window.end 1728 } 1729 1730 // Emit the current point through the channel and then clear it. 1731 r.ch <- FloatPoint{Time: r.window.start, Value: r.sum} 1732 if r.opt.Ascending { 1733 r.window.start, r.window.end = r.opt.Window(p.Time) 1734 } else { 1735 r.window.end, r.window.start = r.opt.Window(p.Time) 1736 } 1737 r.sum = 0.0 1738 } 1739 1740 // Normal operation: update the sum using the trapezium rule 1741 elapsed := float64(p.Time-r.prev.Time) / float64(r.interval.Duration) 1742 r.sum += 0.5 * (value + float64(r.prev.Value)) * elapsed 1743 r.prev = *p 1744 } 1745 1746 // Emit emits the time-integral of the aggregated points as a single FLOAT point 1747 // InfluxQL convention dictates that outside a group-by-time clause we return 1748 // a timestamp of zero. Within a group-by-time, we can set the time to ZeroTime 1749 // and a higher level will change it to the start of the time group. 1750 func (r *IntegerIntegralReducer) Emit() []FloatPoint { 1751 select { 1752 case pt, ok := <-r.ch: 1753 if !ok { 1754 return nil 1755 } 1756 return []FloatPoint{pt} 1757 default: 1758 return nil 1759 } 1760 } 1761 1762 // Close flushes any in progress points to ensure any remaining points are 1763 // emitted. 1764 func (r *IntegerIntegralReducer) Close() error { 1765 // If our last point is at the start time, then discard this point since 1766 // there is no area within this bucket. Otherwise, send off what we 1767 // currently have as the final point. 1768 if !r.prev.Nil && r.prev.Time != r.window.start { 1769 r.ch <- FloatPoint{Time: r.window.start, Value: r.sum} 1770 } 1771 close(r.ch) 1772 return nil 1773 } 1774 1775 // IntegerIntegralReducer calculates the time-integral of the aggregated points. 1776 type UnsignedIntegralReducer struct { 1777 interval Interval 1778 sum float64 1779 prev UnsignedPoint 1780 window struct { 1781 start int64 1782 end int64 1783 } 1784 ch chan FloatPoint 1785 opt IteratorOptions 1786 } 1787 1788 // NewUnsignedIntegralReducer creates a new UnsignedIntegralReducer. 1789 func NewUnsignedIntegralReducer(interval Interval, opt IteratorOptions) *UnsignedIntegralReducer { 1790 return &UnsignedIntegralReducer{ 1791 interval: interval, 1792 prev: UnsignedPoint{Nil: true}, 1793 ch: make(chan FloatPoint, 1), 1794 opt: opt, 1795 } 1796 } 1797 1798 // AggregateUnsigned aggregates a point into the reducer. 1799 func (r *UnsignedIntegralReducer) AggregateUnsigned(p *UnsignedPoint) { 1800 // If this is the first point, just save it 1801 if r.prev.Nil { 1802 r.prev = *p 1803 1804 // Record the end of the time interval. 1805 // We do not care for whether the last number is inclusive or exclusive 1806 // because we treat both the same for the involved math. 1807 if r.opt.Ascending { 1808 r.window.start, r.window.end = r.opt.Window(p.Time) 1809 } else { 1810 r.window.end, r.window.start = r.opt.Window(p.Time) 1811 } 1812 1813 // If we see the minimum allowable time, set the time to zero so we don't 1814 // break the default returned time for aggregate queries without times. 1815 if r.window.start == influxql.MinTime { 1816 r.window.start = 0 1817 } 1818 return 1819 } 1820 1821 // If this point has the same timestamp as the previous one, 1822 // skip the point. Points sent into this reducer are expected 1823 // to be fed in order. 1824 value := float64(p.Value) 1825 if r.prev.Time == p.Time { 1826 r.prev = *p 1827 return 1828 } else if (r.opt.Ascending && p.Time >= r.window.end) || (!r.opt.Ascending && p.Time <= r.window.end) { 1829 // If our previous time is not equal to the window, we need to 1830 // interpolate the area at the end of this interval. 1831 if r.prev.Time != r.window.end { 1832 value = linearFloat(r.window.end, r.prev.Time, p.Time, float64(r.prev.Value), value) 1833 elapsed := float64(r.window.end-r.prev.Time) / float64(r.interval.Duration) 1834 r.sum += 0.5 * (value + float64(r.prev.Value)) * elapsed 1835 1836 r.prev.Time = r.window.end 1837 } 1838 1839 // Emit the current point through the channel and then clear it. 1840 r.ch <- FloatPoint{Time: r.window.start, Value: r.sum} 1841 if r.opt.Ascending { 1842 r.window.start, r.window.end = r.opt.Window(p.Time) 1843 } else { 1844 r.window.end, r.window.start = r.opt.Window(p.Time) 1845 } 1846 r.sum = 0.0 1847 } 1848 1849 // Normal operation: update the sum using the trapezium rule 1850 elapsed := float64(p.Time-r.prev.Time) / float64(r.interval.Duration) 1851 r.sum += 0.5 * (value + float64(r.prev.Value)) * elapsed 1852 r.prev = *p 1853 } 1854 1855 // Emit emits the time-integral of the aggregated points as a single FLOAT point 1856 // InfluxQL convention dictates that outside a group-by-time clause we return 1857 // a timestamp of zero. Within a group-by-time, we can set the time to ZeroTime 1858 // and a higher level will change it to the start of the time group. 1859 func (r *UnsignedIntegralReducer) Emit() []FloatPoint { 1860 select { 1861 case pt, ok := <-r.ch: 1862 if !ok { 1863 return nil 1864 } 1865 return []FloatPoint{pt} 1866 default: 1867 return nil 1868 } 1869 } 1870 1871 // Close flushes any in progress points to ensure any remaining points are 1872 // emitted. 1873 func (r *UnsignedIntegralReducer) Close() error { 1874 // If our last point is at the start time, then discard this point since 1875 // there is no area within this bucket. Otherwise, send off what we 1876 // currently have as the final point. 1877 if !r.prev.Nil && r.prev.Time != r.window.start { 1878 r.ch <- FloatPoint{Time: r.window.start, Value: r.sum} 1879 } 1880 close(r.ch) 1881 return nil 1882 } 1883 1884 type FloatTopReducer struct { 1885 h *floatPointsByFunc 1886 } 1887 1888 func NewFloatTopReducer(n int) *FloatTopReducer { 1889 return &FloatTopReducer{ 1890 h: floatPointsSortBy(make([]FloatPoint, 0, n), func(a, b *FloatPoint) bool { 1891 if a.Value != b.Value { 1892 return a.Value < b.Value 1893 } 1894 return a.Time > b.Time 1895 }), 1896 } 1897 } 1898 1899 func (r *FloatTopReducer) AggregateFloat(p *FloatPoint) { 1900 if r.h.Len() == cap(r.h.points) { 1901 // Compare the minimum point and the aggregated point. If our value is 1902 // larger, replace the current min value. 1903 if !r.h.cmp(&r.h.points[0], p) { 1904 return 1905 } 1906 p.CopyTo(&r.h.points[0]) 1907 heap.Fix(r.h, 0) 1908 return 1909 } 1910 1911 var clone FloatPoint 1912 p.CopyTo(&clone) 1913 heap.Push(r.h, clone) 1914 } 1915 1916 func (r *FloatTopReducer) Emit() []FloatPoint { 1917 // Ensure the points are sorted with the maximum value last. While the 1918 // first point may be the minimum value, the rest is not guaranteed to be 1919 // in any particular order while it is a heap. 1920 points := make([]FloatPoint, len(r.h.points)) 1921 for i, p := range r.h.points { 1922 p.Aggregated = 0 1923 points[i] = p 1924 } 1925 h := floatPointsByFunc{points: points, cmp: r.h.cmp} 1926 sort.Sort(sort.Reverse(&h)) 1927 return points 1928 } 1929 1930 type IntegerTopReducer struct { 1931 h *integerPointsByFunc 1932 } 1933 1934 func NewIntegerTopReducer(n int) *IntegerTopReducer { 1935 return &IntegerTopReducer{ 1936 h: integerPointsSortBy(make([]IntegerPoint, 0, n), func(a, b *IntegerPoint) bool { 1937 if a.Value != b.Value { 1938 return a.Value < b.Value 1939 } 1940 return a.Time > b.Time 1941 }), 1942 } 1943 } 1944 1945 func (r *IntegerTopReducer) AggregateInteger(p *IntegerPoint) { 1946 if r.h.Len() == cap(r.h.points) { 1947 // Compare the minimum point and the aggregated point. If our value is 1948 // larger, replace the current min value. 1949 if !r.h.cmp(&r.h.points[0], p) { 1950 return 1951 } 1952 p.CopyTo(&r.h.points[0]) 1953 heap.Fix(r.h, 0) 1954 return 1955 } 1956 1957 var clone IntegerPoint 1958 p.CopyTo(&clone) 1959 heap.Push(r.h, clone) 1960 } 1961 1962 func (r *IntegerTopReducer) Emit() []IntegerPoint { 1963 // Ensure the points are sorted with the maximum value last. While the 1964 // first point may be the minimum value, the rest is not guaranteed to be 1965 // in any particular order while it is a heap. 1966 points := make([]IntegerPoint, len(r.h.points)) 1967 for i, p := range r.h.points { 1968 p.Aggregated = 0 1969 points[i] = p 1970 } 1971 h := integerPointsByFunc{points: points, cmp: r.h.cmp} 1972 sort.Sort(sort.Reverse(&h)) 1973 return points 1974 } 1975 1976 type UnsignedTopReducer struct { 1977 h *unsignedPointsByFunc 1978 } 1979 1980 func NewUnsignedTopReducer(n int) *UnsignedTopReducer { 1981 return &UnsignedTopReducer{ 1982 h: unsignedPointsSortBy(make([]UnsignedPoint, 0, n), func(a, b *UnsignedPoint) bool { 1983 if a.Value != b.Value { 1984 return a.Value < b.Value 1985 } 1986 return a.Time > b.Time 1987 }), 1988 } 1989 } 1990 1991 func (r *UnsignedTopReducer) AggregateUnsigned(p *UnsignedPoint) { 1992 if r.h.Len() == cap(r.h.points) { 1993 // Compare the minimum point and the aggregated point. If our value is 1994 // larger, replace the current min value. 1995 if !r.h.cmp(&r.h.points[0], p) { 1996 return 1997 } 1998 p.CopyTo(&r.h.points[0]) 1999 heap.Fix(r.h, 0) 2000 return 2001 } 2002 2003 var clone UnsignedPoint 2004 p.CopyTo(&clone) 2005 heap.Push(r.h, clone) 2006 } 2007 2008 func (r *UnsignedTopReducer) Emit() []UnsignedPoint { 2009 // Ensure the points are sorted with the maximum value last. While the 2010 // first point may be the minimum value, the rest is not guaranteed to be 2011 // in any particular order while it is a heap. 2012 points := make([]UnsignedPoint, len(r.h.points)) 2013 for i, p := range r.h.points { 2014 p.Aggregated = 0 2015 points[i] = p 2016 } 2017 h := unsignedPointsByFunc{points: points, cmp: r.h.cmp} 2018 sort.Sort(sort.Reverse(&h)) 2019 return points 2020 } 2021 2022 type FloatBottomReducer struct { 2023 h *floatPointsByFunc 2024 } 2025 2026 func NewFloatBottomReducer(n int) *FloatBottomReducer { 2027 return &FloatBottomReducer{ 2028 h: floatPointsSortBy(make([]FloatPoint, 0, n), func(a, b *FloatPoint) bool { 2029 if a.Value != b.Value { 2030 return a.Value > b.Value 2031 } 2032 return a.Time > b.Time 2033 }), 2034 } 2035 } 2036 2037 func (r *FloatBottomReducer) AggregateFloat(p *FloatPoint) { 2038 if r.h.Len() == cap(r.h.points) { 2039 // Compare the minimum point and the aggregated point. If our value is 2040 // larger, replace the current min value. 2041 if !r.h.cmp(&r.h.points[0], p) { 2042 return 2043 } 2044 p.CopyTo(&r.h.points[0]) 2045 heap.Fix(r.h, 0) 2046 return 2047 } 2048 2049 var clone FloatPoint 2050 p.CopyTo(&clone) 2051 heap.Push(r.h, clone) 2052 } 2053 2054 func (r *FloatBottomReducer) Emit() []FloatPoint { 2055 // Ensure the points are sorted with the maximum value last. While the 2056 // first point may be the minimum value, the rest is not guaranteed to be 2057 // in any particular order while it is a heap. 2058 points := make([]FloatPoint, len(r.h.points)) 2059 for i, p := range r.h.points { 2060 p.Aggregated = 0 2061 points[i] = p 2062 } 2063 h := floatPointsByFunc{points: points, cmp: r.h.cmp} 2064 sort.Sort(sort.Reverse(&h)) 2065 return points 2066 } 2067 2068 type IntegerBottomReducer struct { 2069 h *integerPointsByFunc 2070 } 2071 2072 func NewIntegerBottomReducer(n int) *IntegerBottomReducer { 2073 return &IntegerBottomReducer{ 2074 h: integerPointsSortBy(make([]IntegerPoint, 0, n), func(a, b *IntegerPoint) bool { 2075 if a.Value != b.Value { 2076 return a.Value > b.Value 2077 } 2078 return a.Time > b.Time 2079 }), 2080 } 2081 } 2082 2083 func (r *IntegerBottomReducer) AggregateInteger(p *IntegerPoint) { 2084 if r.h.Len() == cap(r.h.points) { 2085 // Compare the minimum point and the aggregated point. If our value is 2086 // larger, replace the current min value. 2087 if !r.h.cmp(&r.h.points[0], p) { 2088 return 2089 } 2090 p.CopyTo(&r.h.points[0]) 2091 heap.Fix(r.h, 0) 2092 return 2093 } 2094 2095 var clone IntegerPoint 2096 p.CopyTo(&clone) 2097 heap.Push(r.h, clone) 2098 } 2099 2100 func (r *IntegerBottomReducer) Emit() []IntegerPoint { 2101 // Ensure the points are sorted with the maximum value last. While the 2102 // first point may be the minimum value, the rest is not guaranteed to be 2103 // in any particular order while it is a heap. 2104 points := make([]IntegerPoint, len(r.h.points)) 2105 for i, p := range r.h.points { 2106 p.Aggregated = 0 2107 points[i] = p 2108 } 2109 h := integerPointsByFunc{points: points, cmp: r.h.cmp} 2110 sort.Sort(sort.Reverse(&h)) 2111 return points 2112 } 2113 2114 type UnsignedBottomReducer struct { 2115 h *unsignedPointsByFunc 2116 } 2117 2118 func NewUnsignedBottomReducer(n int) *UnsignedBottomReducer { 2119 return &UnsignedBottomReducer{ 2120 h: unsignedPointsSortBy(make([]UnsignedPoint, 0, n), func(a, b *UnsignedPoint) bool { 2121 if a.Value != b.Value { 2122 return a.Value > b.Value 2123 } 2124 return a.Time > b.Time 2125 }), 2126 } 2127 } 2128 2129 func (r *UnsignedBottomReducer) AggregateUnsigned(p *UnsignedPoint) { 2130 if r.h.Len() == cap(r.h.points) { 2131 // Compare the minimum point and the aggregated point. If our value is 2132 // larger, replace the current min value. 2133 if !r.h.cmp(&r.h.points[0], p) { 2134 return 2135 } 2136 p.CopyTo(&r.h.points[0]) 2137 heap.Fix(r.h, 0) 2138 return 2139 } 2140 2141 var clone UnsignedPoint 2142 p.CopyTo(&clone) 2143 heap.Push(r.h, clone) 2144 } 2145 2146 func (r *UnsignedBottomReducer) Emit() []UnsignedPoint { 2147 // Ensure the points are sorted with the maximum value last. While the 2148 // first point may be the minimum value, the rest is not guaranteed to be 2149 // in any particular order while it is a heap. 2150 points := make([]UnsignedPoint, len(r.h.points)) 2151 for i, p := range r.h.points { 2152 p.Aggregated = 0 2153 points[i] = p 2154 } 2155 h := unsignedPointsByFunc{points: points, cmp: r.h.cmp} 2156 sort.Sort(sort.Reverse(&h)) 2157 return points 2158 } 2159 2160 type StringMergeHllReducer struct { 2161 plus *hll.Plus 2162 err error 2163 } 2164 2165 func NewStringMergeHllReducer() *StringMergeHllReducer { 2166 return &StringMergeHllReducer{plus: nil} 2167 } 2168 2169 func unmarshalPlus(s string) (*hll.Plus, error) { 2170 if string(hllPrefix) != s[:len(hllPrefix)] { 2171 if string(hllErrorPrefix) == s[:len(hllErrorPrefix)] { 2172 // parse a special error out of the string. 2173 return nil, fmt.Errorf("%v", s[len(hllErrorPrefix):]) 2174 } 2175 return nil, fmt.Errorf("bad prefix for hll.Plus") 2176 } 2177 data := []byte(s[len(hllPrefix):]) 2178 if len(data) == 0 { 2179 // explicitly treat as empty no-op 2180 return nil, nil 2181 } 2182 b := make([]byte, base64.StdEncoding.DecodedLen(len(data))) 2183 _, _ = base64.StdEncoding.Decode(b, data) 2184 h := new(hll.Plus) 2185 if err := h.UnmarshalBinary(b); err != nil { 2186 return nil, err 2187 } 2188 return h, nil 2189 } 2190 2191 func (r *StringMergeHllReducer) AggregateString(p *StringPoint) { 2192 // we cannot return an error because returning an error slows all aggregation 2193 // functions by ~1%. So we hack around it by marshalling the error as a string. 2194 if r.err != nil { 2195 return 2196 } 2197 h, err := unmarshalPlus(p.Value) 2198 if err != nil { 2199 r.err = err 2200 return 2201 } 2202 if r.plus == nil { 2203 r.plus = h 2204 return 2205 } 2206 err = r.plus.Merge(h) 2207 if err != nil { 2208 r.err = err 2209 return 2210 } 2211 } 2212 2213 func marshalPlus(p *hll.Plus, err error) StringPoint { 2214 if err != nil { 2215 return StringPoint{ 2216 Time: ZeroTime, 2217 Value: string(hllErrorPrefix) + err.Error(), 2218 } 2219 } 2220 if p == nil { 2221 return StringPoint{ 2222 Time: ZeroTime, 2223 Value: string(hllPrefix), 2224 } 2225 } 2226 b, err := p.MarshalBinary() 2227 if err != nil { 2228 return StringPoint{ 2229 Time: ZeroTime, 2230 Value: string(hllErrorPrefix) + err.Error(), 2231 } 2232 } 2233 hllValue := make([]byte, len(hllPrefix)+base64.StdEncoding.EncodedLen(len(b))) 2234 copy(hllValue, hllPrefix) 2235 base64.StdEncoding.Encode(hllValue[len(hllPrefix):], b) 2236 return StringPoint{ 2237 Time: ZeroTime, 2238 Value: string(hllValue), 2239 } 2240 } 2241 2242 func (r *StringMergeHllReducer) Emit() []StringPoint { 2243 return []StringPoint{ 2244 marshalPlus(r.plus, r.err), 2245 } 2246 } 2247 2248 type CountHllReducer struct { 2249 next UnsignedPoint 2250 } 2251 2252 func NewCountHllReducer() *CountHllReducer { 2253 return &CountHllReducer{} 2254 } 2255 2256 func (r *CountHllReducer) AggregateString(p *StringPoint) { 2257 r.next.Name = p.Name 2258 r.next.Time = p.Time 2259 h, err := unmarshalPlus(p.Value) 2260 if err != nil { 2261 r.next.Value = 0 2262 return 2263 } 2264 r.next.Value = h.Count() 2265 } 2266 2267 func (r *CountHllReducer) Emit() []UnsignedPoint { 2268 return []UnsignedPoint{ 2269 r.next, 2270 } 2271 }