github.com/simpleiot/simpleiot@v0.18.3/data/history.go (about)

     1  package data
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"fmt"
     7  	"regexp"
     8  	"strconv"
     9  	"strings"
    10  	"time"
    11  
    12  	"github.com/influxdata/influxdb-client-go/v2/api"
    13  )
    14  
    15  // Regular expressions to sanitize Flux queries
    16  var (
    17  	validField = regexp.MustCompile(`^[a-zA-Z\d.]+$`)
    18  	validValue = regexp.MustCompile(`^[^\\"']*$`)
    19  	// validOp    = regexp.MustCompile(`^(>|>=|<|<=|between|=)$`)
    20  )
    21  
    22  // HistoryQuery is a query that is sent to an Influx DB client to request
    23  // historical points
    24  type HistoryQuery struct {
    25  	Start           time.Time      `json:"start"`
    26  	Stop            time.Time      `json:"stop"`
    27  	TagFilters      TagFilters     `json:"tagFilters"`
    28  	AggregateWindow *time.Duration `json:"aggregateWindow"`
    29  }
    30  
    31  // Flux generates a Flux query for the HistoryQuery. Returns an error if tag
    32  // filters could not be sanitized.
    33  func (qry HistoryQuery) Flux(bucket, measurement string) (string, error) {
    34  	sb := &strings.Builder{}
    35  	fmt.Fprintf(
    36  		sb,
    37  		`import "math"
    38  		data = from(bucket: "%v")
    39  			|> range(start: %v, stop: %v)
    40  			|> filter(fn: (r) =>
    41  				r._measurement == "%v" and
    42  				r._field == "value"`,
    43  		bucket,
    44  		qry.Start.Format(time.RFC3339),
    45  		qry.Stop.Format(time.RFC3339),
    46  		measurement,
    47  	)
    48  	// Add filters
    49  	err := qry.TagFilters.Flux(sb)
    50  	if err != nil {
    51  		return "", err
    52  	}
    53  	sb.WriteString(")\n")
    54  
    55  	// Add aggregation (or not)
    56  	if qry.AggregateWindow == nil {
    57  		sb.WriteString("data")
    58  	} else {
    59  		fmt.Fprintf(
    60  			sb,
    61  			`data
    62  				|> window(every: %vs, createEmpty: false)
    63  				|> reduce(
    64  					identity: {
    65  						min: math.mInf(sign: 1),
    66  						max: math.mInf(sign: -1),
    67  						count: 0,
    68  						sum: 0.0,
    69  					}, fn: (r, accumulator) => ({
    70  						min: if r._value < accumulator.min then r._value else accumulator.min,
    71  						max: if r._value > accumulator.max then r._value else accumulator.max,
    72  						count: accumulator.count + 1,
    73  						sum: accumulator.sum + r._value,
    74  					})
    75  				)
    76  				|> map(fn: (r) => ({r with mean: r.sum / float(v: r.count)}))
    77  				|> duplicate(column: "_stop", as: "_time")
    78  				|> window(every: inf)
    79  			`,
    80  			qry.AggregateWindow.Seconds(),
    81  		)
    82  	}
    83  
    84  	return sb.String(), nil
    85  }
    86  
    87  // Execute generates the Flux query and executes it, populating the specified
    88  // HistoryResults
    89  func (qry HistoryQuery) Execute(
    90  	ctx context.Context,
    91  	api api.QueryAPI,
    92  	bucket, measurement string,
    93  	results *HistoryResults,
    94  ) {
    95  	query, err := qry.Flux(bucket, measurement)
    96  	if err != nil {
    97  		results.ErrorMessage = "generating query: " + err.Error()
    98  		return
    99  	}
   100  	rawResults, err := api.Query(ctx, query)
   101  	if err != nil {
   102  		results.ErrorMessage = "executing query: " + err.Error()
   103  		return
   104  	}
   105  
   106  	// Populate results
   107  	for rawResults.Next() {
   108  		var (
   109  			ts                            time.Time
   110  			typeTag, keyTag, textField    string
   111  			valueField                    float64
   112  			meanField, minField, maxField float64
   113  			countField                    int64
   114  		)
   115  		nodeTags := make(map[string]string)
   116  		for key, val := range rawResults.Record().Values() {
   117  			var ok bool
   118  			switch key {
   119  			case "_time":
   120  				ts, ok = val.(time.Time)
   121  				if !ok {
   122  					results.ErrorMessage = "error decoding field: time"
   123  					return
   124  				}
   125  			case "type":
   126  				typeTag, ok = val.(string)
   127  				if !ok {
   128  					results.ErrorMessage = "error decoding tag: type"
   129  					return
   130  				}
   131  			case "key":
   132  				keyTag, ok = val.(string)
   133  				if !ok {
   134  					results.ErrorMessage = "error decoding tag: key"
   135  					return
   136  				}
   137  			case "text":
   138  				textField, ok = val.(string)
   139  				if !ok {
   140  					results.ErrorMessage = "error decoding field: text"
   141  					return
   142  				}
   143  			case "value":
   144  				valueField, ok = val.(float64)
   145  				if !ok {
   146  					results.ErrorMessage = "error decoding field: value"
   147  					return
   148  				}
   149  			case "mean":
   150  				meanField, ok = val.(float64)
   151  				if !ok {
   152  					results.ErrorMessage = "error decoding field: mean"
   153  					return
   154  				}
   155  			case "min":
   156  				minField, ok = val.(float64)
   157  				if !ok {
   158  					results.ErrorMessage = "error decoding field: min"
   159  					return
   160  				}
   161  			case "max":
   162  				maxField, ok = val.(float64)
   163  				if !ok {
   164  					results.ErrorMessage = "error decoding field: max"
   165  					return
   166  				}
   167  			case "count":
   168  				countField, ok = val.(int64)
   169  				if !ok {
   170  					results.ErrorMessage = "error decoding field: count"
   171  					return
   172  				}
   173  			default:
   174  				if strings.HasPrefix(key, "node.") {
   175  					tag, ok := val.(string)
   176  					if !ok {
   177  						results.ErrorMessage = "error decoding tag: " + key
   178  						return
   179  					}
   180  					nodeTags[key] = tag
   181  				}
   182  			}
   183  		}
   184  
   185  		if qry.AggregateWindow == nil {
   186  			hp := HistoryPoint{
   187  				Time:     ts,
   188  				NodeTags: nodeTags,
   189  				Type:     typeTag,
   190  				Key:      keyTag,
   191  				Value:    valueField,
   192  				Text:     textField,
   193  			}
   194  			results.Points = append(results.Points, hp)
   195  		} else {
   196  			hap := HistoryAggregatedPoint{
   197  				Time:     ts,
   198  				NodeTags: nodeTags,
   199  				Type:     typeTag,
   200  				Key:      keyTag,
   201  				Mean:     meanField,
   202  				Min:      minField,
   203  				Max:      maxField,
   204  				Count:    countField,
   205  			}
   206  			results.AggregatedPoints = append(results.AggregatedPoints, hap)
   207  		}
   208  	}
   209  }
   210  
   211  // TagFilters further reduces Influx query results by tag
   212  // Map values may be strings or a slice of strings
   213  type TagFilters map[string]any
   214  
   215  // Flux writes a clause for a Flux query (to be added to the filter function
   216  // body) to the specified string.Builder. Returns an error if a tag filter
   217  // could not be sanitized.
   218  func (t TagFilters) Flux(sb *strings.Builder) error {
   219  	for k, v := range t {
   220  		// Sanitize input
   221  		if !validField.MatchString(k) {
   222  			return errors.New("invalid tag filter '" + k + "'")
   223  		}
   224  		switch typedV := v.(type) {
   225  		case string:
   226  			if !validValue.MatchString(typedV) {
   227  				return errors.New("invalid tag filter value for '" + k + "'")
   228  			}
   229  			if typedV == "" {
   230  				fmt.Fprintf(sb, ` and not exists r["%s"]`, k)
   231  			} else {
   232  				fmt.Fprintf(sb, ` and r["%s"] == "%s"`, k, typedV)
   233  			}
   234  		case []any:
   235  			if len(typedV) == 0 {
   236  				continue // no values specified, so skip this filter
   237  			}
   238  			for i, elemV := range typedV {
   239  				strV, ok := elemV.(string)
   240  				if !ok || !validValue.MatchString(strV) {
   241  					return errors.New(
   242  						"invalid tag filter value for " + k + "[" + strconv.Itoa(i) + "]",
   243  					)
   244  				}
   245  				if i == 0 {
   246  					if strV == "" {
   247  						fmt.Fprintf(sb, ` and (not exists r["%s"]`, k)
   248  					} else {
   249  						fmt.Fprintf(sb, ` and (r["%s"] == "%s"`, k, strV)
   250  					}
   251  				} else {
   252  					if strV == "" {
   253  						fmt.Fprintf(sb, ` or not exists r["%s"]`, k)
   254  					} else {
   255  						fmt.Fprintf(sb, ` or r["%s"] == "%s"`, k, strV)
   256  					}
   257  				}
   258  			}
   259  			fmt.Fprintf(sb, `)`)
   260  		case []string:
   261  			if len(typedV) == 0 {
   262  				continue // no values specified, so skip this filter
   263  			}
   264  			for i, strV := range typedV {
   265  				if !validValue.MatchString(strV) {
   266  					return errors.New(
   267  						"invalid tag filter value for " + k + "[" + strconv.Itoa(i) + "]",
   268  					)
   269  				}
   270  				if i == 0 {
   271  					if strV == "" {
   272  						fmt.Fprintf(sb, ` and (not exists r["%s"]`, k)
   273  					} else {
   274  						fmt.Fprintf(sb, ` and (r["%s"] == "%s"`, k, strV)
   275  					}
   276  				} else {
   277  					if strV == "" {
   278  						fmt.Fprintf(sb, ` or not exists r["%s"]`, k)
   279  					} else {
   280  						fmt.Fprintf(sb, ` or r["%s"] == "%s"`, k, strV)
   281  					}
   282  				}
   283  			}
   284  			fmt.Fprintf(sb, `)`)
   285  		default:
   286  			return errors.New("invalid tag filter value for '" + k + "': invalid type")
   287  		}
   288  	}
   289  	return nil
   290  }
   291  
   292  // HistoryResults is the result of a history query. The result includes an
   293  // optional error string along with a slice of either points or aggregated
   294  // points.
   295  type HistoryResults struct {
   296  	ErrorMessage     string                   `json:"error,omitempty"`
   297  	Points           []HistoryPoint           `json:"points,omitempty"`
   298  	AggregatedPoints []HistoryAggregatedPoint `json:"aggregatedPoints,omitempty"`
   299  }
   300  
   301  // HistoryPoint is a point returned by a non-aggregated history query
   302  type HistoryPoint struct {
   303  	Time time.Time `json:"time"`
   304  
   305  	// NodeTags (i.e. "id", "description", "type", and others)
   306  	NodeTags map[string]string `json:"nodeTags"`
   307  
   308  	// Type of point (voltage, current, key, etc)
   309  	Type string `json:"type,omitempty"`
   310  
   311  	// Key is used to allow a group of points to represent a map or array
   312  	Key string `json:"key,omitempty"`
   313  
   314  	// Instantaneous analog or digital value of the point.
   315  	// 0 and 1 are used to represent digital values
   316  	Value float64 `json:"value,omitempty"`
   317  
   318  	// Optional text value of the point for data that is best represented
   319  	// as a string rather than a number.
   320  	Text string `json:"text,omitempty"`
   321  }
   322  
   323  // HistoryAggregatedPoint is a group of aggregated points of a history query
   324  type HistoryAggregatedPoint struct {
   325  	Time time.Time `json:"time"`
   326  
   327  	// NodeTags (i.e. "id", "description", "type", and others)
   328  	NodeTags map[string]string `json:"nodeTags"`
   329  
   330  	// Type of point (voltage, current, key, etc)
   331  	Type string `json:"type,omitempty"`
   332  
   333  	// Key is used to allow a group of points to represent a map or array
   334  	Key string `json:"key,omitempty"`
   335  
   336  	// Arithmetic mean of the point values in the aggregated window
   337  	Mean float64 `json:"mean"`
   338  
   339  	// Minimum point value in the aggregated window
   340  	Min float64 `json:"min"`
   341  
   342  	// Maximum point value in the aggregated window
   343  	Max float64 `json:"max"`
   344  
   345  	// Count is the number of points in the aggregated window
   346  	Count int64 `json:"count"`
   347  }