github.com/siglens/siglens@v0.0.0-20240328180423-f7ce9ae441ed/pkg/ast/pipesearch/searchHandler.go (about)

     1  /*
     2  Copyright 2023.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package pipesearch
    18  
    19  import (
    20  	"bytes"
    21  	"encoding/json"
    22  	"fmt"
    23  	"math"
    24  	"sort"
    25  	"strconv"
    26  	"strings"
    27  	"time"
    28  
    29  	jsoniter "github.com/json-iterator/go"
    30  	"github.com/siglens/siglens/pkg/alerts/alertutils"
    31  	rutils "github.com/siglens/siglens/pkg/readerUtils"
    32  	"github.com/siglens/siglens/pkg/segment"
    33  	"github.com/siglens/siglens/pkg/segment/query"
    34  	"github.com/siglens/siglens/pkg/segment/query/metadata"
    35  	"github.com/siglens/siglens/pkg/segment/reader/record"
    36  	"github.com/siglens/siglens/pkg/segment/structs"
    37  	sutils "github.com/siglens/siglens/pkg/segment/utils"
    38  	"github.com/siglens/siglens/pkg/utils"
    39  	vtable "github.com/siglens/siglens/pkg/virtualtable"
    40  	log "github.com/sirupsen/logrus"
    41  	"github.com/valyala/fasthttp"
    42  )
    43  
    44  const MIN_IN_MS = 60_000
    45  const HOUR_IN_MS = 3600_000
    46  const DAY_IN_MS = 86400_000
    47  
    48  /*
    49  Example incomingBody
    50  
    51  {"searchText":"*","startEpoch":1656716713300,"endEpoch":1656717613300,"indexName":"*", "size": 1000, "from": 0}
    52  
    53  # Returns searchText,startEpoch,endEpoch,finalSize,indexName,scrollFrom
    54  
    55  finalSize = size + from
    56  */
    57  func ParseSearchBody(jsonSource map[string]interface{}, nowTs uint64) (string, uint64, uint64, uint64, string, int) {
    58  	var searchText, indexName string
    59  	var startEpoch, endEpoch, finalSize uint64
    60  	var scrollFrom int
    61  	sText, ok := jsonSource["searchText"]
    62  	if !ok || sText == "" {
    63  		searchText = "*"
    64  	} else {
    65  		switch val := sText.(type) {
    66  		case string:
    67  			searchText = val
    68  		default:
    69  			log.Errorf("parseSearchBody searchText is not a string! Val %+v", val)
    70  		}
    71  	}
    72  
    73  	iText, ok := jsonSource["indexName"]
    74  	if !ok || iText == "" {
    75  		indexName = "*"
    76  	} else {
    77  		switch val := iText.(type) {
    78  		case string:
    79  			indexName = val
    80  		case []string:
    81  			indexName = strings.Join(val[:], ",")
    82  		case []interface{}:
    83  
    84  			valLen := len(val)
    85  			indexName = ""
    86  			for idx, indVal := range val {
    87  				if idx == valLen-1 {
    88  					indexName += fmt.Sprintf("%v", indVal)
    89  				} else {
    90  					indexName += fmt.Sprintf("%v,", indVal)
    91  				}
    92  			}
    93  
    94  		default:
    95  			log.Errorf("parseSearchBody indexName is not a string! Val %+v, type: %T", val, iText)
    96  		}
    97  	}
    98  
    99  	startE, ok := jsonSource["startEpoch"]
   100  	if !ok || startE == nil {
   101  		startEpoch = nowTs - (15 * 60 * 1000)
   102  	} else {
   103  		switch val := startE.(type) {
   104  		case json.Number:
   105  			temp, _ := val.Int64()
   106  			startEpoch = uint64(temp)
   107  		case float64:
   108  			startEpoch = uint64(val)
   109  		case int64:
   110  			startEpoch = uint64(val)
   111  		case uint64:
   112  			startEpoch = uint64(val)
   113  		case string:
   114  			defValue := nowTs - (15 * 60 * 1000)
   115  			startEpoch = parseAlphaNumTime(nowTs, string(val), defValue)
   116  		default:
   117  			startEpoch = nowTs - (15 * 60 * 1000)
   118  		}
   119  	}
   120  
   121  	endE, ok := jsonSource["endEpoch"]
   122  	if !ok || endE == nil {
   123  		endEpoch = nowTs
   124  	} else {
   125  		switch val := endE.(type) {
   126  		case json.Number:
   127  			temp, _ := val.Int64()
   128  			endEpoch = uint64(temp)
   129  		case float64:
   130  			endEpoch = uint64(val)
   131  		case int64:
   132  			endEpoch = uint64(val)
   133  		case uint64:
   134  			endEpoch = uint64(val)
   135  		case string:
   136  			endEpoch = parseAlphaNumTime(nowTs, string(val), nowTs)
   137  		default:
   138  			endEpoch = nowTs
   139  		}
   140  	}
   141  
   142  	size, ok := jsonSource["size"]
   143  	if !ok || size == 0 {
   144  		finalSize = uint64(100)
   145  	} else {
   146  		switch val := size.(type) {
   147  		case json.Number:
   148  			temp, _ := val.Int64()
   149  			finalSize = uint64(temp)
   150  		case float64:
   151  			finalSize = uint64(val)
   152  		case int64:
   153  			finalSize = uint64(val)
   154  		case uint64:
   155  			finalSize = uint64(val)
   156  		case int32:
   157  			finalSize = uint64(val)
   158  		case uint32:
   159  			finalSize = uint64(val)
   160  		case int16:
   161  			finalSize = uint64(val)
   162  		case uint16:
   163  			finalSize = uint64(val)
   164  		case int8:
   165  			finalSize = uint64(val)
   166  		case uint8:
   167  			finalSize = uint64(val)
   168  		case int:
   169  			finalSize = uint64(val)
   170  		default:
   171  			finalSize = uint64(100)
   172  		}
   173  	}
   174  
   175  	scroll, ok := jsonSource["from"]
   176  	if !ok || scroll == nil {
   177  		scrollFrom = 0
   178  	} else {
   179  		switch val := scroll.(type) {
   180  		case json.Number:
   181  			temp, _ := val.Int64()
   182  			scrollFrom = int(temp)
   183  		case float64:
   184  			scrollFrom = int(val)
   185  		case int64:
   186  			scrollFrom = int(val)
   187  		case uint64:
   188  			scrollFrom = int(val)
   189  		case int32:
   190  			scrollFrom = int(val)
   191  		case uint32:
   192  			scrollFrom = int(val)
   193  		case int16:
   194  			scrollFrom = int(val)
   195  		case uint16:
   196  			scrollFrom = int(val)
   197  		case int8:
   198  			scrollFrom = int(val)
   199  		case uint8:
   200  			scrollFrom = int(val)
   201  		case int:
   202  			scrollFrom = val
   203  		default:
   204  			log.Infof("parseSearchBody: unknown type for scroll=%T", val)
   205  			scrollFrom = 0
   206  		}
   207  	}
   208  	finalSize = finalSize + uint64(scrollFrom)
   209  
   210  	return searchText, startEpoch, endEpoch, finalSize, indexName, scrollFrom
   211  }
   212  
   213  func ProcessAlertsPipeSearchRequest(queryParams alertutils.QueryParams) int {
   214  
   215  	queryData := fmt.Sprintf(`{
   216  		"from": "0",
   217  		"indexName": "*",
   218  		"queryLanguage": "%s",
   219  		"searchText": "%s",
   220  		"startEpoch": "%s",
   221  		"endEpoch" : "%s",
   222  		"state": "query"
   223  	}`, queryParams.QueryLanguage, utils.EscapeQuotes(queryParams.QueryText), queryParams.StartTime, queryParams.EndTime)
   224  	orgid := uint64(0)
   225  	dbPanelId := "-1"
   226  	queryStart := time.Now()
   227  
   228  	rawJSON := []byte(queryData)
   229  	if rawJSON == nil {
   230  		log.Errorf("ALERTSERVICE: ProcessAlertsPipeSearchRequest: received empty search request body ")
   231  		return -1
   232  	}
   233  
   234  	qid := rutils.GetNextQid()
   235  	readJSON := make(map[string]interface{})
   236  	var jsonc = jsoniter.ConfigCompatibleWithStandardLibrary
   237  	decoder := jsonc.NewDecoder(bytes.NewReader(rawJSON))
   238  	decoder.UseNumber()
   239  	err := decoder.Decode(&readJSON)
   240  	if err != nil {
   241  		log.Errorf("qid=%v, ALERTSERVICE: ProcessAlertsPipeSearchRequest: failed to decode search request body! Err=%+v", qid, err)
   242  	}
   243  
   244  	nowTs := utils.GetCurrentTimeInMs()
   245  	searchText, startEpoch, endEpoch, sizeLimit, indexNameIn, scrollFrom := ParseSearchBody(readJSON, nowTs)
   246  
   247  	if scrollFrom > 10_000 {
   248  		return -1
   249  	}
   250  
   251  	ti := structs.InitTableInfo(indexNameIn, orgid, false)
   252  	log.Infof("qid=%v, ALERTSERVICE: ProcessAlertsPipeSearchRequest: index=[%s], searchString=[%v] ",
   253  		qid, ti.String(), searchText)
   254  
   255  	queryLanguageType := readJSON["queryLanguage"]
   256  	var simpleNode *structs.ASTNode
   257  	var aggs *structs.QueryAggregators
   258  	if queryLanguageType == "Pipe QL" {
   259  		simpleNode, aggs, err = ParseRequest(searchText, startEpoch, endEpoch, qid, "Pipe QL", indexNameIn)
   260  		if err != nil {
   261  			log.Errorf("qid=%v, ALERTSERVICE: ProcessAlertsPipeSearchRequest: Error parsing query err=%+v", qid, err)
   262  			return -1
   263  		}
   264  
   265  		if aggs != nil && (aggs.GroupByRequest != nil || aggs.MeasureOperations != nil) {
   266  			sizeLimit = 0
   267  		} else if aggs.HasDedupBlockInChain() || aggs.HasSortBlockInChain() || aggs.HasRexBlockInChainWithStats() {
   268  			// 1. Dedup needs to see all the matched records before it can return any
   269  			// of them when there's a sortby option.
   270  			// 2. If there's a Rex block in the chain followed by a Stats block, we need to
   271  			// see all the matched records before we apply or calculate the stats.
   272  			sizeLimit = math.MaxUint64
   273  		}
   274  		qc := structs.InitQueryContextWithTableInfo(ti, sizeLimit, scrollFrom, orgid, false)
   275  		result := segment.ExecuteQuery(simpleNode, aggs, qid, qc)
   276  		httpRespOuter := getQueryResponseJson(result, indexNameIn, queryStart, sizeLimit, qid, aggs, result.TotalRRCCount, dbPanelId)
   277  
   278  		if httpRespOuter.MeasureResults != nil && len(httpRespOuter.MeasureResults) > 0 && httpRespOuter.MeasureResults[0].MeasureVal != nil {
   279  			measureVal, ok := httpRespOuter.MeasureResults[0].MeasureVal[queryParams.QueryText].(string)
   280  			if ok {
   281  				measureVal = strings.ReplaceAll(measureVal, ",", "")
   282  
   283  				measureNum, err := strconv.Atoi(measureVal)
   284  				if err != nil {
   285  					log.Errorf("ALERTSERVICE: ProcessAlertsPipeSearchRequest Error parsing int from a string: %s", err)
   286  					return -1
   287  				}
   288  				return measureNum
   289  			}
   290  		}
   291  	} else if queryLanguageType == "Splunk QL" {
   292  		simpleNode, aggs, err = ParseRequest(searchText, startEpoch, endEpoch, qid, "Splunk QL", indexNameIn)
   293  		if err != nil {
   294  			log.Errorf("qid=%v, ALERTSERVICE: ProcessAlertsPipeSearchRequest: Error parsing query err=%+v", qid, err)
   295  			return -1
   296  		}
   297  
   298  		if aggs != nil && (aggs.GroupByRequest != nil || aggs.MeasureOperations != nil) {
   299  			sizeLimit = 0
   300  		}
   301  		qc := structs.InitQueryContextWithTableInfo(ti, sizeLimit, scrollFrom, orgid, false)
   302  		result := segment.ExecuteQuery(simpleNode, aggs, qid, qc)
   303  		httpRespOuter := getQueryResponseJson(result, indexNameIn, queryStart, sizeLimit, qid, aggs, result.TotalRRCCount, dbPanelId)
   304  		if httpRespOuter.MeasureResults != nil && len(httpRespOuter.MeasureResults) > 0 && httpRespOuter.MeasureResults[0].MeasureVal != nil {
   305  			measureVal, ok := httpRespOuter.MeasureResults[0].MeasureVal[httpRespOuter.MeasureFunctions[0]].(string)
   306  			if ok {
   307  				measureVal = strings.ReplaceAll(measureVal, ",", "")
   308  				measureNum, err := strconv.ParseFloat(measureVal, 64)
   309  				if err != nil {
   310  					log.Errorf("ALERTSERVICE: ProcessAlertsPipeSearchRequest Error parsing int from a string: %s", err)
   311  					return -1
   312  				}
   313  				return int(measureNum)
   314  			}
   315  		}
   316  	} else {
   317  		log.Infof("ProcessAlertsPipeSearchRequest: unknown queryLanguageType: %v;", queryLanguageType)
   318  	}
   319  
   320  	return -1
   321  }
   322  
   323  func ProcessPipeSearchRequest(ctx *fasthttp.RequestCtx, myid uint64) {
   324  	defer utils.DeferableAddAccessLogEntry(
   325  		time.Now(),
   326  		func() time.Time { return time.Now() },
   327  		"No-user", // TODO : Add logged in user when user auth is implemented
   328  		ctx.Request.URI().String(),
   329  		string(ctx.PostBody()),
   330  		func() int { return ctx.Response.StatusCode() },
   331  		false,
   332  		"access.log",
   333  	)
   334  
   335  	dbPanelId := utils.ExtractParamAsString(ctx.UserValue("dbPanel-id"))
   336  	queryStart := time.Now()
   337  	rawJSON := ctx.PostBody()
   338  	if rawJSON == nil {
   339  		log.Errorf(" ProcessPipeSearchRequest: received empty search request body ")
   340  		utils.SetBadMsg(ctx, "")
   341  		return
   342  	}
   343  	qid := rutils.GetNextQid()
   344  
   345  	readJSON := make(map[string]interface{})
   346  	var jsonc = jsoniter.ConfigCompatibleWithStandardLibrary
   347  	decoder := jsonc.NewDecoder(bytes.NewReader(rawJSON))
   348  	decoder.UseNumber()
   349  	err := decoder.Decode(&readJSON)
   350  	if err != nil {
   351  		ctx.SetStatusCode(fasthttp.StatusBadRequest)
   352  		_, err = ctx.WriteString(err.Error())
   353  		if err != nil {
   354  			log.Errorf("qid=%v, ProcessPipeSearchRequest: could not write error message err=%v", qid, err)
   355  		}
   356  		log.Errorf("qid=%v, ProcessPipeSearchRequest: failed to decode search request body! Err=%+v", qid, err)
   357  	}
   358  
   359  	nowTs := utils.GetCurrentTimeInMs()
   360  	searchText, startEpoch, endEpoch, sizeLimit, indexNameIn, scrollFrom := ParseSearchBody(readJSON, nowTs)
   361  
   362  	if scrollFrom > 10_000 {
   363  		processMaxScrollCount(ctx, qid)
   364  		return
   365  	}
   366  
   367  	ti := structs.InitTableInfo(indexNameIn, myid, false)
   368  	log.Infof("qid=%v, ProcessPipeSearchRequest: index=[%s], searchString=[%v] ",
   369  		qid, ti.String(), searchText)
   370  
   371  	queryLanguageType := readJSON["queryLanguage"]
   372  	var simpleNode *structs.ASTNode
   373  	var aggs *structs.QueryAggregators
   374  	if queryLanguageType == "SQL" {
   375  		simpleNode, aggs, err = ParseRequest(searchText, startEpoch, endEpoch, qid, "SQL", indexNameIn)
   376  	} else if queryLanguageType == "Pipe QL" {
   377  		simpleNode, aggs, err = ParseRequest(searchText, startEpoch, endEpoch, qid, "Pipe QL", indexNameIn)
   378  	} else if queryLanguageType == "Log QL" {
   379  		simpleNode, aggs, err = ParseRequest(searchText, startEpoch, endEpoch, qid, "Log QL", indexNameIn)
   380  	} else if queryLanguageType == "Splunk QL" {
   381  		simpleNode, aggs, err = ParseRequest(searchText, startEpoch, endEpoch, qid, "Splunk QL", indexNameIn)
   382  	} else {
   383  		log.Infof("ProcessPipeSearchRequest: unknown queryLanguageType: %v; using Pipe QL instead", queryLanguageType)
   384  		simpleNode, aggs, err = ParseRequest(searchText, startEpoch, endEpoch, qid, "Pipe QL", indexNameIn)
   385  	}
   386  
   387  	if err != nil {
   388  		ctx.SetStatusCode(fasthttp.StatusBadRequest)
   389  		_, err = ctx.WriteString(err.Error())
   390  		if err != nil {
   391  			log.Errorf("qid=%v, ProcessPipeSearchRequest: could not write error message err=%v", qid, err)
   392  		}
   393  		log.Errorf("qid=%v, ProcessPipeSearchRequest: Error parsing query err=%+v", qid, err)
   394  		return
   395  	}
   396  
   397  	if aggs != nil && (aggs.GroupByRequest != nil || aggs.MeasureOperations != nil) {
   398  		sizeLimit = 0
   399  	} else if aggs.HasDedupBlockInChain() {
   400  		// Dedup needs to see all the matched records before it can return any
   401  		// of them when there's a sortby option.
   402  		sizeLimit = math.MaxUint64
   403  	} else if aggs.HasRexBlockInChainWithStats() {
   404  		// If there's a Stats block in the chain followed by a Rex block, we need to
   405  		// see all the matched records before we apply or calculate the stats.
   406  		sizeLimit = math.MaxUint64
   407  	}
   408  
   409  	// If MaxRows is used to limit the number of returned results, set `sizeLimit`
   410  	// to it. Currently MaxRows is only valid as the root QueryAggregators.
   411  	if aggs != nil && aggs.Limit != 0 {
   412  		sizeLimit = uint64(aggs.Limit)
   413  	}
   414  	if queryLanguageType == "SQL" && aggs != nil && aggs.TableName != "*" {
   415  		indexNameIn = aggs.TableName
   416  		ti = structs.InitTableInfo(indexNameIn, myid, false) // Re-initialize ti with the updated indexNameIn
   417  	}
   418  
   419  	qc := structs.InitQueryContextWithTableInfo(ti, sizeLimit, scrollFrom, myid, false)
   420  	result := segment.ExecuteQuery(simpleNode, aggs, qid, qc)
   421  	httpRespOuter := getQueryResponseJson(result, indexNameIn, queryStart, sizeLimit, qid, aggs, result.TotalRRCCount, dbPanelId)
   422  	utils.WriteJsonResponse(ctx, httpRespOuter)
   423  
   424  	ctx.SetStatusCode(fasthttp.StatusOK)
   425  }
   426  
   427  func getQueryResponseJson(nodeResult *structs.NodeResult, indexName string, queryStart time.Time, sizeLimit uint64, qid uint64, aggs *structs.QueryAggregators, numRRCs uint64, dbPanelId string) PipeSearchResponseOuter {
   428  	var httpRespOuter PipeSearchResponseOuter
   429  	var httpResp PipeSearchResponse
   430  
   431  	// aggs exist, so just return aggregations instead of all results
   432  	httpRespOuter.Aggs = convertBucketToAggregationResponse(nodeResult.Histogram)
   433  	if len(nodeResult.ErrList) > 0 {
   434  		for _, err := range nodeResult.ErrList {
   435  			httpRespOuter.Errors = append(httpRespOuter.Errors, err.Error())
   436  		}
   437  	}
   438  	json, allCols, err := convertRRCsToJSONResponse(nodeResult.AllRecords, sizeLimit, qid, nodeResult.SegEncToKey, aggs)
   439  	if err != nil {
   440  		httpRespOuter.Errors = append(httpRespOuter.Errors, err.Error())
   441  		return httpRespOuter
   442  	}
   443  
   444  	var canScrollMore bool
   445  	if numRRCs == sizeLimit {
   446  		// if the number of RRCs is exactly equal to the requested size, there may be more than size matches. Hence, we can scroll more
   447  		canScrollMore = true
   448  	}
   449  	httpResp.Hits = json
   450  	httpResp.TotalMatched = convertQueryCountToTotalResponse(nodeResult.TotalResults)
   451  	httpRespOuter.Hits = httpResp
   452  	httpRespOuter.AllPossibleColumns = allCols
   453  	httpRespOuter.ElapedTimeMS = time.Since(queryStart).Milliseconds()
   454  	httpRespOuter.Qtype = nodeResult.Qtype
   455  	httpRespOuter.CanScrollMore = canScrollMore
   456  	httpRespOuter.TotalRRCCount = numRRCs
   457  	httpRespOuter.MeasureFunctions = nodeResult.MeasureFunctions
   458  	httpRespOuter.MeasureResults = nodeResult.MeasureResults
   459  	httpRespOuter.GroupByCols = nodeResult.GroupByCols
   460  	httpRespOuter.BucketCount = nodeResult.BucketCount
   461  	httpRespOuter.DashboardPanelId = dbPanelId
   462  
   463  	log.Infof("qid=%d, Query Took %+v ms", qid, httpRespOuter.ElapedTimeMS)
   464  
   465  	return httpRespOuter
   466  }
   467  
   468  // returns converted json, all columns, or any errors
   469  func convertRRCsToJSONResponse(rrcs []*sutils.RecordResultContainer, sizeLimit uint64,
   470  	qid uint64, segencmap map[uint16]string, aggs *structs.QueryAggregators) ([]map[string]interface{}, []string, error) {
   471  
   472  	hits := make([]map[string]interface{}, 0)
   473  	if sizeLimit == 0 || len(rrcs) == 0 {
   474  		return hits, []string{}, nil
   475  	}
   476  
   477  	allJsons, allCols, err := record.GetJsonFromAllRrc(rrcs, false, qid, segencmap, aggs)
   478  	if err != nil {
   479  		log.Errorf("qid=%d, convertRRCsToJSONResponse: failed to get allrecords from rrc, err=%v", qid, err)
   480  		return allJsons, allCols, err
   481  	}
   482  
   483  	if sizeLimit < uint64(len(allJsons)) {
   484  		allJsons = allJsons[:sizeLimit]
   485  	}
   486  	return allJsons, allCols, nil
   487  }
   488  
   489  func convertBucketToAggregationResponse(buckets map[string]*structs.AggregationResult) map[string]AggregationResults {
   490  	resp := make(map[string]AggregationResults)
   491  	for aggName, aggRes := range buckets {
   492  		allBuckets := make([]map[string]interface{}, len(aggRes.Results))
   493  
   494  		for idx, hist := range aggRes.Results {
   495  			res := make(map[string]interface{})
   496  			var bucketKey interface{}
   497  			bucketKeyList, ok := hist.BucketKey.([]string)
   498  			if ok && len(bucketKeyList) == 1 {
   499  				bucketKey = bucketKeyList[0]
   500  			} else {
   501  				bucketKey = hist.BucketKey
   502  			}
   503  			res["key"] = bucketKey
   504  			res["doc_count"] = hist.ElemCount
   505  			if aggRes.IsDateHistogram {
   506  				res["key_as_string"] = fmt.Sprintf("%v", hist.BucketKey)
   507  			}
   508  			for name, value := range hist.StatRes {
   509  				res[name] = utils.StatResponse{
   510  					Value: value.CVal,
   511  				}
   512  			}
   513  
   514  			allBuckets[idx] = res
   515  		}
   516  		resp[aggName] = AggregationResults{Buckets: allBuckets}
   517  	}
   518  	return resp
   519  }
   520  
   521  func convertQueryCountToTotalResponse(qc *structs.QueryCount) interface{} {
   522  	if !qc.EarlyExit {
   523  		return qc.TotalCount
   524  	}
   525  
   526  	return utils.HitsCount{Value: qc.TotalCount, Relation: qc.Op.ToString()}
   527  }
   528  
   529  /*
   530     Supports "now-[Num][Unit]"
   531     Num ==> any positive integer
   532     Unit ==> m(minutes), h(hours), d(days)
   533  */
   534  
   535  func parseAlphaNumTime(nowTs uint64, inp string, defValue uint64) uint64 {
   536  
   537  	sanTime := strings.ReplaceAll(inp, " ", "")
   538  
   539  	if sanTime == "now" {
   540  		return nowTs
   541  	}
   542  
   543  	retVal := defValue
   544  
   545  	strln := len(sanTime)
   546  	if strln < 6 {
   547  		return retVal
   548  	}
   549  
   550  	unit := sanTime[strln-1]
   551  	num, err := strconv.ParseInt(sanTime[4:strln-1], 0, 64)
   552  	if err != nil {
   553  		return defValue
   554  	}
   555  
   556  	switch unit {
   557  	case 'm':
   558  		retVal = nowTs - MIN_IN_MS*uint64(num)
   559  	case 'h':
   560  		retVal = nowTs - HOUR_IN_MS*uint64(num)
   561  	case 'd':
   562  		retVal = nowTs - DAY_IN_MS*uint64(num)
   563  	}
   564  	return retVal
   565  }
   566  
   567  func GetAutoCompleteData(ctx *fasthttp.RequestCtx, myid uint64) {
   568  
   569  	var resp utils.AutoCompleteDataInfo
   570  	allVirtualTableNames, err := vtable.GetVirtualTableNames(myid)
   571  	if err != nil {
   572  		log.Errorf("GetAutoCompleteData: failed to get all virtual table names, err=%v", err)
   573  		ctx.SetStatusCode(fasthttp.StatusBadRequest)
   574  	}
   575  
   576  	sortedIndices := make([]string, 0, len(allVirtualTableNames))
   577  
   578  	for k := range allVirtualTableNames {
   579  		sortedIndices = append(sortedIndices, k)
   580  	}
   581  
   582  	sort.Strings(sortedIndices)
   583  
   584  	for _, indexName := range sortedIndices {
   585  		if indexName == "" {
   586  			log.Errorf("GetAutoCompleteData: skipping an empty index name indexName=%v", indexName)
   587  			continue
   588  		}
   589  
   590  	}
   591  
   592  	resp.ColumnNames = metadata.GetAllColNames(sortedIndices)
   593  	resp.MeasureFunctions = []string{"min", "max", "avg", "count", "sum", "cardinality"}
   594  	utils.WriteJsonResponse(ctx, resp)
   595  	ctx.SetStatusCode(fasthttp.StatusOK)
   596  }
   597  
   598  func processMaxScrollCount(ctx *fasthttp.RequestCtx, qid uint64) {
   599  	resp := &PipeSearchResponseOuter{
   600  		CanScrollMore: false,
   601  	}
   602  	qType := query.GetQueryType(qid)
   603  	resp.Qtype = qType.String()
   604  	utils.WriteJsonResponse(ctx, resp)
   605  	ctx.SetStatusCode(fasthttp.StatusOK)
   606  
   607  }