github.com/influxdata/influxdb/v2@v2.7.6/influxql/query/proxy_executor.go (about)

     1  package query
     2  
     3  import (
     4  	"context"
     5  	"io"
     6  	"strings"
     7  	"time"
     8  
     9  	iql "github.com/influxdata/influxdb/v2/influxql"
    10  	"github.com/influxdata/influxdb/v2/kit/check"
    11  	"github.com/influxdata/influxdb/v2/kit/platform/errors"
    12  	"github.com/influxdata/influxdb/v2/kit/tracing"
    13  	influxlogger "github.com/influxdata/influxdb/v2/logger"
    14  	"github.com/influxdata/influxql"
    15  	"github.com/opentracing/opentracing-go/log"
    16  	"go.uber.org/zap"
    17  )
    18  
    19  type ProxyExecutor struct {
    20  	log      *zap.Logger
    21  	executor *Executor
    22  }
    23  
    24  func NewProxyExecutor(log *zap.Logger, executor *Executor) *ProxyExecutor {
    25  	return &ProxyExecutor{log: log, executor: executor}
    26  }
    27  
    28  func (s *ProxyExecutor) Check(ctx context.Context) check.Response {
    29  	return check.Response{Name: "Query Service", Status: check.StatusPass}
    30  }
    31  
    32  func (s *ProxyExecutor) Query(ctx context.Context, w io.Writer, req *iql.QueryRequest) (iql.Statistics, error) {
    33  	span, ctx := tracing.StartSpanFromContext(ctx)
    34  	defer span.Finish()
    35  
    36  	logger := s.log.With(influxlogger.TraceFields(ctx)...)
    37  	logger.Info("executing new query", zap.String("query", req.Query))
    38  
    39  	p := influxql.NewParser(strings.NewReader(req.Query))
    40  	p.SetParams(req.Params)
    41  	q, err := p.ParseQuery()
    42  	if err != nil {
    43  		return iql.Statistics{}, &errors.Error{
    44  			Code: errors.EInvalid,
    45  			Msg:  "failed to parse query",
    46  			Err:  err,
    47  		}
    48  	}
    49  
    50  	span.LogFields(log.String("query", q.String()))
    51  
    52  	opts := ExecutionOptions{
    53  		OrgID:           req.OrganizationID,
    54  		Database:        req.DB,
    55  		RetentionPolicy: req.RP,
    56  		ChunkSize:       req.ChunkSize,
    57  		ReadOnly:        true,
    58  		Authorizer:      OpenAuthorizer,
    59  	}
    60  
    61  	epoch := req.Epoch
    62  	rw := NewResponseWriter(req.EncodingFormat)
    63  
    64  	results, stats := s.executor.ExecuteQuery(ctx, q, opts)
    65  	if req.Chunked {
    66  		for r := range results {
    67  			// Ignore nil results.
    68  			if r == nil {
    69  				continue
    70  			}
    71  
    72  			// if requested, convert result timestamps to epoch
    73  			if epoch != "" {
    74  				convertToEpoch(r, epoch)
    75  			}
    76  
    77  			err = rw.WriteResponse(ctx, w, Response{Results: []*Result{r}})
    78  			if err != nil {
    79  				break
    80  			}
    81  		}
    82  	} else {
    83  		resp := Response{Results: GatherResults(results, epoch)}
    84  		err = rw.WriteResponse(ctx, w, resp)
    85  	}
    86  
    87  	return *stats, err
    88  }
    89  
    90  // GatherResults consumes the results from the given channel and organizes them correctly.
    91  // Results for various statements need to be combined together.
    92  func GatherResults(ch <-chan *Result, epoch string) []*Result {
    93  	var results []*Result
    94  	for r := range ch {
    95  		// Ignore nil results.
    96  		if r == nil {
    97  			continue
    98  		}
    99  
   100  		// if requested, convert result timestamps to epoch
   101  		if epoch != "" {
   102  			convertToEpoch(r, epoch)
   103  		}
   104  
   105  		// It's not chunked so buffer results in memory.
   106  		// Results for statements need to be combined together.
   107  		// We need to check if this new result is for the same statement as
   108  		// the last result, or for the next statement.
   109  		if l := len(results); l > 0 && results[l-1].StatementID == r.StatementID {
   110  			if r.Err != nil {
   111  				results[l-1] = r
   112  				continue
   113  			}
   114  
   115  			cr := results[l-1]
   116  			rowsMerged := 0
   117  			if len(cr.Series) > 0 {
   118  				lastSeries := cr.Series[len(cr.Series)-1]
   119  
   120  				for _, row := range r.Series {
   121  					if !lastSeries.SameSeries(row) {
   122  						// Next row is for a different series than last.
   123  						break
   124  					}
   125  					// Values are for the same series, so append them.
   126  					lastSeries.Values = append(lastSeries.Values, row.Values...)
   127  					lastSeries.Partial = row.Partial
   128  					rowsMerged++
   129  				}
   130  			}
   131  
   132  			// Append remaining rows as new rows.
   133  			r.Series = r.Series[rowsMerged:]
   134  			cr.Series = append(cr.Series, r.Series...)
   135  			cr.Messages = append(cr.Messages, r.Messages...)
   136  			cr.Partial = r.Partial
   137  		} else {
   138  			results = append(results, r)
   139  		}
   140  	}
   141  	return results
   142  }
   143  
   144  // convertToEpoch converts result timestamps from time.Time to the specified epoch.
   145  func convertToEpoch(r *Result, epoch string) {
   146  	divisor := int64(1)
   147  
   148  	switch epoch {
   149  	case "u":
   150  		divisor = int64(time.Microsecond)
   151  	case "ms":
   152  		divisor = int64(time.Millisecond)
   153  	case "s":
   154  		divisor = int64(time.Second)
   155  	case "m":
   156  		divisor = int64(time.Minute)
   157  	case "h":
   158  		divisor = int64(time.Hour)
   159  	}
   160  
   161  	for _, s := range r.Series {
   162  		for _, v := range s.Values {
   163  			if ts, ok := v[0].(time.Time); ok {
   164  				v[0] = ts.UnixNano() / divisor
   165  			}
   166  		}
   167  	}
   168  }