github.com/siglens/siglens@v0.0.0-20240328180423-f7ce9ae441ed/pkg/segment/search/segsearch.go (about)

     1  /*
     2  Copyright 2023.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package search
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"runtime"
    23  	"sort"
    24  	"strings"
    25  	"sync"
    26  	"time"
    27  
    28  	blob "github.com/siglens/siglens/pkg/blob"
    29  	dtu "github.com/siglens/siglens/pkg/common/dtypeutils"
    30  	"github.com/siglens/siglens/pkg/config"
    31  	"github.com/siglens/siglens/pkg/querytracker"
    32  	"github.com/siglens/siglens/pkg/segment/memory/limit"
    33  	"github.com/siglens/siglens/pkg/segment/pqmr"
    34  	"github.com/siglens/siglens/pkg/segment/query/pqs"
    35  	pqsmeta "github.com/siglens/siglens/pkg/segment/query/pqs/meta"
    36  	"github.com/siglens/siglens/pkg/segment/query/summary"
    37  	"github.com/siglens/siglens/pkg/segment/reader/microreader"
    38  	"github.com/siglens/siglens/pkg/segment/reader/segread"
    39  	"github.com/siglens/siglens/pkg/segment/results/blockresults"
    40  	"github.com/siglens/siglens/pkg/segment/results/segresults"
    41  	"github.com/siglens/siglens/pkg/segment/structs"
    42  	"github.com/siglens/siglens/pkg/segment/utils"
    43  	"github.com/siglens/siglens/pkg/segment/writer"
    44  	"github.com/siglens/siglens/pkg/utils/semaphore"
    45  	log "github.com/sirupsen/logrus"
    46  )
    47  
    48  var numConcurrentRawSearch *semaphore.WeightedSemaphore
    49  
    50  func init() {
    51  	// We may want to increase this to GOMAXPROCS; but testing on a 32-vCPU server
    52  	// with this set to GOMAXPROCS sometimes lead to all threads waiting on the
    53  	// GLOBAL_FD_LIMITER semaphore so all progress stopped.
    54  	// With GOMAXPROCS / 2 we still get most of the benefit because this server is
    55  	// also doing other things.
    56  	max := runtime.GOMAXPROCS(0) / 2
    57  	if max < 1 {
    58  		max = 1
    59  	}
    60  
    61  	numConcurrentRawSearch = semaphore.NewWeightedSemaphore(int64(max), "rawsearch.limiter", time.Minute)
    62  }
    63  
    64  const BLOCK_BATCH_SIZE = 100
    65  
    66  func RawSearchSegmentFileWrapper(req *structs.SegmentSearchRequest, parallelismPerFile int64,
    67  	searchNode *structs.SearchNode, timeRange *dtu.TimeRange, sizeLimit uint64, aggs *structs.QueryAggregators,
    68  	allSearchResults *segresults.SearchResults, qid uint64, qs *summary.QuerySummary) {
    69  	err := numConcurrentRawSearch.TryAcquireWithBackoff(1, 5, fmt.Sprintf("qid.%d", qid))
    70  	if err != nil {
    71  		log.Errorf("qid=%d Failed to Acquire resources for raw search! error %+v", qid, err)
    72  		allSearchResults.AddError(err)
    73  		return
    74  	}
    75  	defer numConcurrentRawSearch.Release(1)
    76  	searchMemory := req.GetMaxSearchMemorySize(searchNode, parallelismPerFile, PQMR_INITIAL_SIZE)
    77  	err = limit.RequestSearchMemory(searchMemory)
    78  	if err != nil {
    79  		log.Errorf("qid=%d, Failed to acquire memory from global pool for search! Error: %v", qid, err)
    80  		allSearchResults.AddError(err)
    81  		return
    82  	}
    83  	loadMetadataForSearchRequest(req, qid)
    84  
    85  	// only chunk when we have a query with no aggs. else, raw search with no chunks.
    86  	shouldChunk := false
    87  	if aggs == nil || (aggs.TimeHistogram == nil && aggs.GroupByRequest == nil) {
    88  		shouldChunk = true
    89  	}
    90  
    91  	if !shouldChunk {
    92  		rawSearchColumnar(req, searchNode, timeRange, sizeLimit, aggs, parallelismPerFile, allSearchResults, qid, qs)
    93  		return
    94  	}
    95  	// if not match_all then do search in N chunk of blocks
    96  	sortedAllBlks := make([]*structs.BlockMetadataHolder, len(req.AllBlocksToSearch))
    97  	var i int
    98  	for _, bmh := range req.AllBlocksToSearch {
    99  		if bmh == nil {
   100  			continue
   101  		}
   102  		sortedAllBlks[i] = bmh
   103  		i++
   104  	}
   105  	sortedAllBlks = sortedAllBlks[:i]
   106  	if aggs != nil && aggs.Sort != nil && aggs.Sort.Ascending {
   107  		sort.Slice(sortedAllBlks, func(i, j int) bool { return sortedAllBlks[i].BlkNum < sortedAllBlks[j].BlkNum })
   108  	} else {
   109  		sort.Slice(sortedAllBlks, func(i, j int) bool { return sortedAllBlks[i].BlkNum > sortedAllBlks[j].BlkNum })
   110  	}
   111  	for i := 0; i < len(sortedAllBlks); {
   112  		nm := make(map[uint16]*structs.BlockMetadataHolder, BLOCK_BATCH_SIZE)
   113  		for j := 0; j < BLOCK_BATCH_SIZE && i < len(sortedAllBlks); {
   114  			nm[sortedAllBlks[i].BlkNum] = sortedAllBlks[i]
   115  			j++
   116  			i++
   117  		}
   118  		req.AllBlocksToSearch = nm
   119  		rawSearchColumnar(req, searchNode, timeRange, sizeLimit, aggs, parallelismPerFile, allSearchResults, qid, qs)
   120  	}
   121  }
   122  
   123  func writePqmrFiles(segmentSearchRecords *SegmentSearchStatus, segmentKey string,
   124  	virtualTableName string, qid uint64, pqid string, latestEpochMS uint64, cmiPassedCnames map[uint16]map[string]bool) error {
   125  	pqidFname := fmt.Sprintf("%v/pqmr/%v.pqmr", segmentKey, pqid)
   126  	reqLen := uint64(0)
   127  	allPqmrFile := make([]string, 0)
   128  	// Calculating the required size for the buffer that we need to write to
   129  	for _, blkSearchResult := range segmentSearchRecords.AllBlockStatus {
   130  
   131  		// Adding 2 bytes for blockNum and 2 bytes for blockLen
   132  		size := 4 + blkSearchResult.allRecords.GetInMemSize()
   133  		reqLen += size
   134  	}
   135  
   136  	var idxEmpty uint32
   137  	emptyBitset := pqmr.CreatePQMatchResults(0)
   138  	bufEmpty := make([]byte, (4+emptyBitset.GetInMemSize())*uint64(len(cmiPassedCnames)))
   139  	for blockNum := range cmiPassedCnames {
   140  		if _, ok := segmentSearchRecords.AllBlockStatus[blockNum]; !ok {
   141  			packedLen, err := emptyBitset.EncodePqmr(bufEmpty[idxEmpty:], blockNum)
   142  			if err != nil {
   143  				log.Errorf("qid=%d, writePqmrFiles: failed to encode pqmr. Err:%v", qid, err)
   144  				return err
   145  			}
   146  			idxEmpty += uint32(packedLen)
   147  		}
   148  	}
   149  
   150  	// Creating a buffer of a required length
   151  	buf := make([]byte, reqLen)
   152  	var idx uint32
   153  	for blockNum, blkSearchResult := range segmentSearchRecords.AllBlockStatus {
   154  		packedLen, err := blkSearchResult.allRecords.EncodePqmr(buf[idx:], blockNum)
   155  		if err != nil {
   156  			log.Errorf("qid=%d, writePqmrFiles: failed to encode pqmr. Err:%v", qid, err)
   157  			return err
   158  		}
   159  		idx += uint32(packedLen)
   160  	}
   161  
   162  	sizeToAdd := len(bufEmpty)
   163  	if sizeToAdd > 0 {
   164  		newArr := make([]byte, sizeToAdd)
   165  		buf = append(buf, newArr...)
   166  	}
   167  	copy(buf[idx:], bufEmpty)
   168  	idx += uint32(sizeToAdd)
   169  	err := pqmr.WritePqmrToDisk(buf[0:idx], pqidFname)
   170  	if err != nil {
   171  		log.Errorf("qid=%d, writePqmrFiles: failed to flush pqmr results to fname %s. Err:%v", qid, pqidFname, err)
   172  		return err
   173  	}
   174  	writer.BackFillPQSSegmetaEntry(segmentKey, pqid)
   175  	pqs.AddPersistentQueryResult(segmentKey, virtualTableName, pqid)
   176  	allPqmrFile = append(allPqmrFile, pqidFname)
   177  	err = blob.UploadIngestNodeDir()
   178  	if err != nil {
   179  		log.Errorf("qid=%d, writePqmrFiles: failed to upload ingest node directory! Err: %v", qid, err)
   180  	}
   181  	err = blob.UploadSegmentFiles(allPqmrFile)
   182  	if err != nil {
   183  		log.Errorf("qid=%d, writePqmrFiles: failed to upload backfilled pqmr file! Err: %v", qid, err)
   184  	}
   185  	return nil
   186  }
   187  
   188  func rawSearchColumnar(searchReq *structs.SegmentSearchRequest, searchNode *structs.SearchNode, timeRange *dtu.TimeRange,
   189  	sizeLimit uint64, aggs *structs.QueryAggregators, fileParallelism int64, allSearchResults *segresults.SearchResults, qid uint64,
   190  	querySummary *summary.QuerySummary) {
   191  	if fileParallelism <= 0 {
   192  		log.Errorf("qid=%d, RawSearchSegmentFile: invalid fileParallelism of %d - must be > 0", qid, fileParallelism)
   193  		allSearchResults.AddError(errors.New("invalid fileParallelism - must be > 0"))
   194  		return
   195  	} else if searchReq == nil {
   196  		log.Errorf("qid=%d, RawSearchSegmentFile: received a nil search request for %s", qid, searchReq.SegmentKey)
   197  		allSearchResults.AddError(errors.New("nil search request"))
   198  		return
   199  	} else if searchReq.SearchMetadata == nil {
   200  		log.Errorf("qid=%d, RawSearchSegmentFile: search metadata not provided for %s", qid, searchReq.SegmentKey)
   201  		allSearchResults.AddError(errors.New("search metadata not provided"))
   202  		return
   203  	}
   204  
   205  	blockSummaries := searchReq.SearchMetadata.BlockSummaries
   206  	if blockSummaries == nil {
   207  		log.Errorf("qid=%d, RawSearchSegmentFile: received empty blocksummaries for %s", qid, searchReq.SegmentKey)
   208  		allSearchResults.AddError(errors.New("block summaries not provided"))
   209  		return
   210  	}
   211  
   212  	sTime := time.Now()
   213  
   214  	queryMetrics := &structs.QueryProcessingMetrics{}
   215  	searchNode.AddQueryInfoForNode()
   216  
   217  	segmentSearchRecords := InitBlocksToSearch(searchReq, blockSummaries, allSearchResults, timeRange)
   218  	queryMetrics.SetNumBlocksToRawSearch(uint64(segmentSearchRecords.numBlocksToSearch))
   219  	queryMetrics.SetNumBlocksInSegFile(uint64(segmentSearchRecords.numBlocksInSegFile))
   220  	numBlockFilteredRecords, _ := segmentSearchRecords.getTotalCounts()
   221  	queryMetrics.SetNumRecordsToRawSearch(numBlockFilteredRecords)
   222  
   223  	if len(segmentSearchRecords.AllBlockStatus) == 0 {
   224  		log.Debugf("qid=%d, RawSearchSegmentFile: no blocks to search for %s", qid, searchReq.SegmentKey)
   225  		return
   226  	}
   227  	allBlockSearchHelpers := structs.InitAllBlockSearchHelpers(fileParallelism)
   228  	executeRawSearchOnNode(searchNode, searchReq, segmentSearchRecords, allBlockSearchHelpers, queryMetrics,
   229  		qid, allSearchResults)
   230  	err := applyAggregationsToResult(aggs, segmentSearchRecords, searchReq, blockSummaries, timeRange,
   231  		sizeLimit, fileParallelism, queryMetrics, qid, allSearchResults)
   232  	if err != nil {
   233  		log.Errorf("qid=%d RawSearchColumnar failed to apply aggregations to result for segKey %+v. Error: %v", qid, searchReq.SegmentKey, err)
   234  		allSearchResults.AddError(err)
   235  		return
   236  	}
   237  
   238  	finalMatched, finalUnmatched := segmentSearchRecords.getTotalCounts()
   239  	segmentSearchRecords.Close()
   240  	queryMetrics.SetNumRecordsMatched(finalMatched)
   241  	queryMetrics.SetNumRecordsUnmatched(finalUnmatched)
   242  
   243  	if finalMatched > 0 {
   244  		searchReq.HasMatchedRrc = true
   245  	}
   246  
   247  	timeElapsed := time.Since(sTime)
   248  	querySummary.UpdateSummary(summary.RAW, timeElapsed, queryMetrics)
   249  
   250  	if pqid, ok := shouldBackFillPQMR(searchNode, searchReq, qid); ok {
   251  		if finalMatched == 0 {
   252  			go writeEmptyPqmetaFilesWrapper(pqid, searchReq.SegmentKey, searchReq.VirtualTableName)
   253  		} else {
   254  			go writePqmrFilesWrapper(segmentSearchRecords, searchReq, qid, pqid)
   255  		}
   256  	}
   257  }
   258  
   259  func writeEmptyPqmetaFilesWrapper(pqid string, segKey string, vTableName string) {
   260  	pqsmeta.AddEmptyResults(pqid, segKey, vTableName)
   261  	writer.BackFillPQSSegmetaEntry(segKey, pqid)
   262  }
   263  
   264  func shouldBackFillPQMR(searchNode *structs.SearchNode, searchReq *structs.SegmentSearchRequest, qid uint64) (string, bool) {
   265  	if config.IsPQSEnabled() {
   266  		pqid := querytracker.GetHashForQuery(searchNode)
   267  
   268  		ok, err := querytracker.IsQueryPersistent([]string{searchReq.VirtualTableName}, searchNode)
   269  		if err != nil {
   270  			log.Errorf("qid=%d, Failed to check if query is persistent Error: %v", qid, err)
   271  			return "", false
   272  		}
   273  		if ok {
   274  			if searchReq.SType == structs.RAW_SEARCH && searchNode.NodeType != structs.MatchAllQuery {
   275  				return pqid, true
   276  			}
   277  		}
   278  	}
   279  	return "", false
   280  }
   281  
   282  func writePqmrFilesWrapper(segmentSearchRecords *SegmentSearchStatus, searchReq *structs.SegmentSearchRequest, qid uint64, pqid string) {
   283  	if strings.Contains(searchReq.SegmentKey, "/active/") {
   284  		return
   285  	}
   286  	if strings.Contains(searchReq.SegmentKey, config.GetHostID()) {
   287  		err := writePqmrFiles(segmentSearchRecords, searchReq.SegmentKey, searchReq.VirtualTableName, qid, pqid, searchReq.LatestEpochMS, searchReq.CmiPassedCnames)
   288  		if err != nil {
   289  			log.Errorf(" qid:%d, Failed to write pqmr file.  Error: %v", qid, err)
   290  		}
   291  	}
   292  }
   293  
   294  func RawSearchPQMResults(req *structs.SegmentSearchRequest, fileParallelism int64, timeRange *dtu.TimeRange, aggs *structs.QueryAggregators,
   295  	sizeLimit uint64, spqmr *pqmr.SegmentPQMRResults, allSearchResults *segresults.SearchResults, qid uint64, querySummary *summary.QuerySummary) {
   296  	sTime := time.Now()
   297  
   298  	err := numConcurrentRawSearch.TryAcquireWithBackoff(1, 5, fmt.Sprintf("qid.%d", qid))
   299  	if err != nil {
   300  		log.Errorf("qid=%d Failed to Acquire resources for pqs search! error %+v", qid, err)
   301  		allSearchResults.AddError(err)
   302  		return
   303  	}
   304  	defer numConcurrentRawSearch.Release(1)
   305  
   306  	allTimestamps, err := segread.ReadAllTimestampsForBlock(req.AllBlocksToSearch, req.SegmentKey,
   307  		req.SearchMetadata.BlockSummaries, fileParallelism)
   308  	if err != nil {
   309  		allSearchResults.AddError(err)
   310  		return
   311  	}
   312  	defer segread.ReturnTimeBuffers(allTimestamps)
   313  
   314  	sharedReader, err := segread.InitSharedMultiColumnReaders(req.SegmentKey, req.AllPossibleColumns, req.AllBlocksToSearch,
   315  		req.SearchMetadata.BlockSummaries, int(fileParallelism), qid)
   316  	if err != nil {
   317  		log.Errorf("qid=%v, RawSearchPQMResults: failed to load all column files reader for %s. Needed cols %+v. Err: %+v",
   318  			qid, req.SegmentKey, req.AllPossibleColumns, err)
   319  		allSearchResults.AddError(err)
   320  		return
   321  	}
   322  	defer sharedReader.Close()
   323  
   324  	queryMetrics := &structs.QueryProcessingMetrics{}
   325  	runningBlockManagers := &sync.WaitGroup{}
   326  	filterBlockRequestsChan := make(chan uint16, spqmr.GetNumBlocks())
   327  
   328  	rupReader, err := segread.InitNewRollupReader(req.SegmentKey, config.GetTimeStampKey(), qid)
   329  	if err != nil {
   330  		log.Errorf("qid=%d, RawSearchPQMResults: failed initialize rollup reader segkey %s. Error: %v",
   331  			qid, req.SegmentKey, err)
   332  	} else {
   333  		defer rupReader.Close()
   334  	}
   335  	allBlocksToXRollup, aggsHasTimeHt, _ := getRollupForAggregation(aggs, rupReader)
   336  	for i := int64(0); i < fileParallelism; i++ {
   337  		runningBlockManagers.Add(1)
   338  		go rawSearchSingleSPQMR(sharedReader.MultiColReaders[i], req, aggs, runningBlockManagers,
   339  			filterBlockRequestsChan, spqmr, allSearchResults, allTimestamps, timeRange, sizeLimit, queryMetrics,
   340  			allBlocksToXRollup, aggsHasTimeHt, qid)
   341  	}
   342  
   343  	sortedAllBlks := spqmr.GetAllBlocks()
   344  	if aggs != nil && aggs.Sort != nil && aggs.Sort.Ascending {
   345  		sort.Slice(sortedAllBlks, func(i, j int) bool { return sortedAllBlks[i] < sortedAllBlks[j] })
   346  	} else {
   347  		sort.Slice(sortedAllBlks, func(i, j int) bool { return sortedAllBlks[i] > sortedAllBlks[j] })
   348  	}
   349  
   350  	for _, blkNum := range sortedAllBlks {
   351  		filterBlockRequestsChan <- blkNum
   352  	}
   353  	close(filterBlockRequestsChan)
   354  
   355  	queryMetrics.SetNumBlocksInSegFile(uint64(spqmr.GetNumBlocks()))
   356  	runningBlockManagers.Wait()
   357  
   358  	timeElapsed := time.Since(sTime)
   359  	querySummary.UpdateSummary(summary.PQS, timeElapsed, queryMetrics)
   360  }
   361  
   362  func rawSearchSingleSPQMR(multiReader *segread.MultiColSegmentReader, req *structs.SegmentSearchRequest, aggs *structs.QueryAggregators,
   363  	runningWG *sync.WaitGroup, filterBlockRequestsChan chan uint16, sqpmr *pqmr.SegmentPQMRResults, allSearchResults *segresults.SearchResults,
   364  	allTimestamps map[uint16][]uint64, tRange *dtu.TimeRange, sizeLimit uint64, queryMetrics *structs.QueryProcessingMetrics,
   365  	allBlocksToXRollup map[uint16]map[uint64]*writer.RolledRecs, aggsHasTimeHt bool, qid uint64) {
   366  	defer runningWG.Done()
   367  
   368  	blkResults, err := blockresults.InitBlockResults(sizeLimit, aggs, qid)
   369  	measureInfo, internalMops := blkResults.GetConvertedMeasureInfo()
   370  	for blockNum := range filterBlockRequestsChan {
   371  		if req.SearchMetadata == nil || int(blockNum) >= len(req.SearchMetadata.BlockSummaries) {
   372  			log.Errorf("qid=%d, rawSearchSingleSPQMR unable to extract block summary for block %d, segkey=%v", qid, blockNum, req.SegmentKey)
   373  			continue
   374  		}
   375  		blkSum := req.SearchMetadata.BlockSummaries[blockNum]
   376  		if err != nil {
   377  			log.Errorf("qid=%v, applyAggregationsToSingleBlock: failed to initialize block results reader for %s. Err: %v",
   378  				qid, req.SegmentKey, err)
   379  			allSearchResults.AddError(err)
   380  		}
   381  		if !tRange.CheckRangeOverLap(blkSum.LowTs, blkSum.HighTs) {
   382  			continue
   383  		}
   384  		pqmr, found := sqpmr.GetBlockResults(blockNum)
   385  		if !found {
   386  			log.Errorf("qid=%d, rawSearchSingleSPQMR unable to get pqmr results for block %d, segkey=%v", qid, blockNum, req.SegmentKey)
   387  			continue
   388  		}
   389  
   390  		numRecsInBlock := uint(blkSum.RecCount)
   391  		currTS, ok := allTimestamps[blockNum]
   392  		if !ok {
   393  			log.Errorf("qid=%d, rawSearchSingleSPQMR failed to get timestamps for block %d. Number of read ts blocks %+v, segkey=%v", qid, blockNum, len(allTimestamps), req.SegmentKey)
   394  			continue
   395  		}
   396  		isBlkFullyEncosed := tRange.AreTimesFullyEnclosed(blkSum.LowTs, blkSum.HighTs)
   397  		if blkResults.ShouldIterateRecords(aggsHasTimeHt, isBlkFullyEncosed, blkSum.LowTs, blkSum.HighTs, false) {
   398  			for recNum := uint(0); recNum < numRecsInBlock; recNum++ {
   399  				if pqmr.DoesRecordMatch(recNum) {
   400  					if int(recNum) > len(currTS) {
   401  						log.Errorf("qid=%d, rawSearchSingleSPQMR tried to get the ts for recNum %+v but only %+v records exist, segkey=%v", qid, recNum, len(currTS), req.SegmentKey)
   402  						continue
   403  					}
   404  					recTs := currTS[recNum]
   405  					if !tRange.CheckInRange(recTs) {
   406  						pqmr.ClearBit(recNum)
   407  						continue
   408  					}
   409  					convertedRecNum := uint16(recNum)
   410  					if err != nil {
   411  						log.Errorf("qid=%d, rawSearchSingleSPQMR failed to get time stamp for record %+v in block %+v, segkey=%v, Err: %v",
   412  							qid, recNum, blockNum, req.SegmentKey, err)
   413  						continue
   414  					}
   415  					if blkResults.ShouldAddMore() {
   416  						sortVal, invalidCol := extractSortVals(aggs, multiReader, blockNum, convertedRecNum, recTs, qid)
   417  						if !invalidCol && blkResults.WillValueBeAdded(sortVal) {
   418  							rrc := &utils.RecordResultContainer{
   419  								SegKeyInfo: utils.SegKeyInfo{
   420  									SegKeyEnc: allSearchResults.GetAddSegEnc(req.SegmentKey),
   421  									IsRemote:  false,
   422  								},
   423  								BlockNum:         blockNum,
   424  								RecordNum:        convertedRecNum,
   425  								SortColumnValue:  sortVal,
   426  								VirtualTableName: req.VirtualTableName,
   427  								TimeStamp:        recTs,
   428  							}
   429  							blkResults.Add(rrc)
   430  						}
   431  					}
   432  				}
   433  			}
   434  
   435  		}
   436  
   437  		toXRollup, ok := allBlocksToXRollup[blockNum]
   438  		if aggsHasTimeHt && ok {
   439  			for rupTskey, rr := range toXRollup {
   440  				rr.MatchedRes.InPlaceIntersection(pqmr)
   441  				matchedRrCount := uint16(rr.MatchedRes.GetNumberOfSetBits())
   442  				blkResults.AddKeyToTimeBucket(rupTskey, matchedRrCount)
   443  			}
   444  		}
   445  		if aggs != nil && aggs.GroupByRequest != nil {
   446  			recIT := InitIteratorFromPQMR(pqmr, numRecsInBlock)
   447  			addRecordToAggregations(aggs.GroupByRequest, aggs.TimeHistogram, measureInfo, len(internalMops),
   448  				multiReader, blockNum, recIT, blkResults, qid)
   449  		}
   450  		numRecsMatched := uint64(pqmr.GetNumberOfSetBits())
   451  
   452  		if numRecsMatched > 0 {
   453  			req.HasMatchedRrc = true
   454  		}
   455  
   456  		blkResults.AddMatchedCount(numRecsMatched)
   457  		queryMetrics.IncrementNumRecordsNoMatch(uint64(numRecsInBlock) - numRecsMatched)
   458  		queryMetrics.IncrementNumRecordsWithMatch(numRecsMatched)
   459  		queryMetrics.IncrementNumBlocksToRawSearch(1)
   460  	}
   461  	allSearchResults.AddBlockResults(blkResults)
   462  }
   463  
   464  func executeRawSearchOnNode(node *structs.SearchNode, searchReq *structs.SegmentSearchRequest, segmentSearch *SegmentSearchStatus,
   465  	allBlockSearchHelpers []*structs.BlockSearchHelper, queryMetrics *structs.QueryProcessingMetrics,
   466  	qid uint64, allSearchResults *segresults.SearchResults) {
   467  
   468  	if node.AndSearchConditions != nil {
   469  		applyRawSearchToConditions(node.AndSearchConditions, searchReq, segmentSearch, allBlockSearchHelpers,
   470  			utils.And, queryMetrics, qid, allSearchResults)
   471  	}
   472  
   473  	if node.OrSearchConditions != nil {
   474  		applyRawSearchToConditions(node.OrSearchConditions, searchReq, segmentSearch, allBlockSearchHelpers,
   475  			utils.Or, queryMetrics, qid, allSearchResults)
   476  	}
   477  
   478  	if node.ExclusionSearchConditions != nil {
   479  		applyRawSearchToConditions(node.ExclusionSearchConditions, searchReq, segmentSearch, allBlockSearchHelpers,
   480  			utils.Exclusion, queryMetrics, qid, allSearchResults)
   481  	}
   482  }
   483  
   484  func applyRawSearchToConditions(cond *structs.SearchCondition, searchReq *structs.SegmentSearchRequest, segmentSearch *SegmentSearchStatus,
   485  	allBlockSearchHelpers []*structs.BlockSearchHelper, op utils.LogicalOperator, queryMetrics *structs.QueryProcessingMetrics, qid uint64,
   486  	allSearchResults *segresults.SearchResults) {
   487  
   488  	if cond.SearchNode != nil {
   489  		for _, sNode := range cond.SearchNode {
   490  			executeRawSearchOnNode(sNode, searchReq, segmentSearch, allBlockSearchHelpers, queryMetrics,
   491  				qid, allSearchResults)
   492  		}
   493  	}
   494  	if cond.SearchQueries != nil {
   495  		for _, query := range cond.SearchQueries {
   496  			RawSearchSingleQuery(query, searchReq, segmentSearch, allBlockSearchHelpers, op, queryMetrics,
   497  				qid, allSearchResults)
   498  		}
   499  	}
   500  }
   501  
   502  func extractSortVals(aggs *structs.QueryAggregators, multiColReader *segread.MultiColSegmentReader, blkNum uint16,
   503  	recNum uint16, recTs uint64, qid uint64) (float64, bool) {
   504  
   505  	var sortVal float64
   506  	var err error
   507  	var invalidAggsCol bool = false
   508  
   509  	if aggs == nil || aggs.Sort == nil {
   510  		return sortVal, invalidAggsCol
   511  	}
   512  
   513  	if aggs.Sort.ColName == config.GetTimeStampKey() {
   514  		sortVal = float64(recTs)
   515  		return sortVal, invalidAggsCol
   516  	}
   517  
   518  	colVal, err := multiColReader.ExtractValueFromColumnFile(aggs.Sort.ColName, blkNum, recNum, qid)
   519  	if err != nil {
   520  		invalidAggsCol = true
   521  		return sortVal, invalidAggsCol
   522  	}
   523  	floatVal, err := colVal.GetFloatValue()
   524  	if err != nil {
   525  		invalidAggsCol = true
   526  		return 0, invalidAggsCol
   527  	}
   528  	return floatVal, invalidAggsCol
   529  }
   530  
   531  func loadMetadataForSearchRequest(searchReq *structs.SegmentSearchRequest, qid uint64) {
   532  	if searchReq.SearchMetadata.BlockSummaries == nil {
   533  		sFile := fmt.Sprintf("%v.bsu", searchReq.SegmentKey)
   534  		err := blob.DownloadSegmentBlob(sFile, false)
   535  		if err != nil {
   536  			log.Errorf("qid=%v, Failed to download bsu file for segment %s. SegSetFile struct %+v",
   537  				qid, searchReq.SegmentKey, sFile)
   538  			return
   539  		}
   540  		bSum, _, _, err := microreader.ReadBlockSummaries(searchReq.SearchMetadata.BlockSummariesFile, []byte{})
   541  		if err != nil {
   542  			log.Errorf("qid=%v, loadMetadataForSearchRequest: failed to read block summaries for segment %s. block summary file: %s. Error: %+v",
   543  				qid, searchReq.SegmentKey, searchReq.SearchMetadata.BlockSummariesFile, err)
   544  		} else {
   545  			searchReq.SearchMetadata.BlockSummaries = bSum
   546  		}
   547  	}
   548  }
   549  
   550  // returns the rolled up blocks, a bool indicating whether aggregations has a time histogram and a bool indicating whether aggregations has a non time aggregation
   551  func getRollupForAggregation(aggs *structs.QueryAggregators, rupReader *segread.RollupReader) (map[uint16]map[uint64]*writer.RolledRecs, bool, bool) {
   552  	var allBlocksToXRollup map[uint16]map[uint64]*writer.RolledRecs = nil
   553  	aggsHasTimeHt := false
   554  	aggsHasNonTimeHt := false
   555  	if aggs != nil {
   556  		if aggs.TimeHistogram != nil {
   557  			aggsHasTimeHt = true
   558  			switch htInt := aggs.TimeHistogram.IntervalMillis; {
   559  			case htInt < 3600_000:
   560  				// sec or millisecond based time-histogram, we up it to minute based
   561  				if rupReader != nil {
   562  					val, err := rupReader.GetMinRollups()
   563  					if err == nil {
   564  						allBlocksToXRollup = val
   565  					}
   566  				}
   567  			case htInt < 86400_000:
   568  				if rupReader != nil {
   569  					val, err := rupReader.GetHourRollups()
   570  					if err == nil {
   571  						allBlocksToXRollup = val
   572  					}
   573  				}
   574  			default:
   575  				if rupReader != nil {
   576  					val, err := rupReader.GetDayRollups()
   577  					if err == nil {
   578  						allBlocksToXRollup = val
   579  					}
   580  				}
   581  			}
   582  		}
   583  		if aggs.GroupByRequest != nil {
   584  			aggsHasNonTimeHt = true
   585  		}
   586  	}
   587  	return allBlocksToXRollup, aggsHasTimeHt, aggsHasNonTimeHt
   588  }
   589  
   590  func AggsFastPathWrapper(req *structs.SegmentSearchRequest, parallelismPerFile int64,
   591  	searchNode *structs.SearchNode, timeRange *dtu.TimeRange, sizeLimit uint64, aggs *structs.QueryAggregators,
   592  	allSearchResults *segresults.SearchResults, qid uint64, qs *summary.QuerySummary) {
   593  
   594  	err := numConcurrentRawSearch.TryAcquireWithBackoff(1, 5, fmt.Sprintf("qid.%d", qid))
   595  	if err != nil {
   596  		log.Errorf("qid=%d Failed to Acquire resources for aggs fast path! error %+v", qid, err)
   597  		allSearchResults.AddError(err)
   598  		return
   599  	}
   600  	defer numConcurrentRawSearch.Release(1)
   601  	searchMemory := req.GetMaxSearchMemorySize(searchNode, parallelismPerFile, PQMR_INITIAL_SIZE)
   602  	err = limit.RequestSearchMemory(searchMemory)
   603  	if err != nil {
   604  		log.Errorf("qid=%d, Failed to acquire memory from global pool for search! Error: %v", qid, err)
   605  		allSearchResults.AddError(err)
   606  		return
   607  	}
   608  	loadMetadataForSearchRequest(req, qid)
   609  
   610  	aggsFastPath(req, searchNode, timeRange, sizeLimit, aggs, parallelismPerFile, allSearchResults, qid, qs)
   611  }
   612  
   613  func aggsFastPath(searchReq *structs.SegmentSearchRequest, searchNode *structs.SearchNode, timeRange *dtu.TimeRange,
   614  	sizeLimit uint64, aggs *structs.QueryAggregators, fileParallelism int64, allSearchResults *segresults.SearchResults, qid uint64,
   615  	querySummary *summary.QuerySummary) {
   616  
   617  	if fileParallelism <= 0 {
   618  		log.Errorf("qid=%d, AggsFastPath: invalid fileParallelism of %d - must be > 0", qid, fileParallelism)
   619  		allSearchResults.AddError(errors.New("invalid fileParallelism - must be > 0"))
   620  		return
   621  	} else if searchReq == nil {
   622  		log.Errorf("qid=%d, AggsFastPath: received a nil search request for %s", qid, searchReq.SegmentKey)
   623  		allSearchResults.AddError(errors.New("nil search request"))
   624  		return
   625  	} else if searchReq.SearchMetadata == nil {
   626  		log.Errorf("qid=%d, AggsFastPath: search metadata not provided for %s", qid, searchReq.SegmentKey)
   627  		allSearchResults.AddError(errors.New("search metadata not provided"))
   628  		return
   629  	}
   630  
   631  	blockSummaries := searchReq.SearchMetadata.BlockSummaries
   632  	if blockSummaries == nil {
   633  		log.Errorf("qid=%d, AggsFastPath: received empty blocksummaries for %s", qid, searchReq.SegmentKey)
   634  		allSearchResults.AddError(errors.New("block summaries not provided"))
   635  		return
   636  	}
   637  
   638  	sTime := time.Now()
   639  
   640  	queryMetrics := &structs.QueryProcessingMetrics{}
   641  	searchNode.AddQueryInfoForNode()
   642  
   643  	segmentSearchRecords := InitBlocksForAggsFastPath(searchReq, blockSummaries)
   644  	queryMetrics.SetNumBlocksToRawSearch(uint64(segmentSearchRecords.numBlocksToSearch))
   645  	queryMetrics.SetNumBlocksInSegFile(uint64(segmentSearchRecords.numBlocksInSegFile))
   646  	numBlockFilteredRecords := segmentSearchRecords.getTotalCountsFastPath()
   647  	queryMetrics.SetNumRecordsToRawSearch(numBlockFilteredRecords)
   648  
   649  	if len(segmentSearchRecords.AllBlockStatus) == 0 {
   650  		log.Errorf("qid=%d, Finished raw search for file %s in %+v", qid, searchReq.SegmentKey, time.Since(sTime))
   651  		log.Errorf("qid=%d, numRecordsInSegFile=%+v numTimeFilteredRecords=%+v timeRange=%+v",
   652  			qid, queryMetrics.NumRecordsToRawSearch, queryMetrics.NumBlocksToRawSearch, timeRange)
   653  		return
   654  	}
   655  
   656  	err := applyAggregationsToResultFastPath(aggs, segmentSearchRecords, searchReq, blockSummaries, timeRange,
   657  		sizeLimit, fileParallelism, queryMetrics, qid, allSearchResults)
   658  	if err != nil {
   659  		log.Errorf("qid=%d RawSearchColumnar failed to apply aggregations to result for segKey %+v. Error: %v", qid, searchReq.SegmentKey, err)
   660  		allSearchResults.AddError(err)
   661  		return
   662  	}
   663  
   664  	finalMatched := segmentSearchRecords.getTotalCountsFastPath()
   665  	segmentSearchRecords.Close()
   666  	queryMetrics.SetNumRecordsMatched(finalMatched)
   667  
   668  	if finalMatched > 0 {
   669  		searchReq.HasMatchedRrc = true
   670  	}
   671  
   672  	timeElapsed := time.Since(sTime)
   673  	querySummary.UpdateSummary(summary.RAW, timeElapsed, queryMetrics)
   674  }
   675  
   676  // This function raw compute segment stats and will return a map[string]*structs.SegStats, for all the measureOps parameter
   677  // This function will check for timestamp and so should be used for partially enclosed segments, and unrotated segments.
   678  func RawComputeSegmentStats(req *structs.SegmentSearchRequest, fileParallelism int64,
   679  	searchNode *structs.SearchNode, timeRange *dtu.TimeRange, measureOps []*structs.MeasureAggregator,
   680  	allSearchResults *segresults.SearchResults, qid uint64, qs *summary.QuerySummary) (map[string]*structs.SegStats, error) {
   681  
   682  	err := numConcurrentRawSearch.TryAcquireWithBackoff(1, 5, fmt.Sprintf("qid.%d", qid))
   683  	if err != nil {
   684  		log.Errorf("qid=%d Failed to Acquire resources for raw search! error %+v", qid, err)
   685  		allSearchResults.AddError(err)
   686  		return nil, errors.New("failed to acquire resources for segment stats")
   687  	}
   688  	defer numConcurrentRawSearch.Release(1)
   689  
   690  	if fileParallelism <= 0 {
   691  		log.Errorf("qid=%d, RawSearchSegmentFile: invalid fileParallelism of %d - must be > 0", qid, fileParallelism)
   692  		return nil, errors.New("invalid fileParallelism - must be > 0")
   693  	} else if req == nil {
   694  		log.Errorf("qid=%d, RawSearchSegmentFile: received a nil search request for %s", qid, req.SegmentKey)
   695  		return nil, errors.New("received a nil search request")
   696  	} else if req.SearchMetadata == nil {
   697  		log.Errorf("qid=%d, RawSearchSegmentFile: search metadata not provided for %s", qid, req.SegmentKey)
   698  		return nil, errors.New("search metadata not provided")
   699  	}
   700  
   701  	blockSummaries := req.SearchMetadata.BlockSummaries
   702  	if blockSummaries == nil {
   703  		log.Errorf("qid=%d, RawSearchSegmentFile: received empty blocksummaries for %s", qid, req.SegmentKey)
   704  		return nil, errors.New("search metadata not provided")
   705  	}
   706  
   707  	sTime := time.Now()
   708  
   709  	queryMetrics := &structs.QueryProcessingMetrics{}
   710  	searchNode.AddQueryInfoForNode()
   711  
   712  	segmentSearchRecords := InitBlocksToSearch(req, blockSummaries, allSearchResults, timeRange)
   713  	queryMetrics.SetNumBlocksToRawSearch(uint64(segmentSearchRecords.numBlocksToSearch))
   714  	queryMetrics.SetNumBlocksInSegFile(uint64(segmentSearchRecords.numBlocksInSegFile))
   715  	numBlockFilteredRecords, _ := segmentSearchRecords.getTotalCounts()
   716  	queryMetrics.SetNumRecordsToRawSearch(numBlockFilteredRecords)
   717  
   718  	retVal := make(map[string]*structs.SegStats)
   719  	if len(segmentSearchRecords.AllBlockStatus) == 0 {
   720  		return retVal, nil
   721  	}
   722  
   723  	allBlockSearchHelpers := structs.InitAllBlockSearchHelpers(fileParallelism)
   724  	executeRawSearchOnNode(searchNode, req, segmentSearchRecords, allBlockSearchHelpers, queryMetrics,
   725  		qid, allSearchResults)
   726  
   727  	segStats, err := applySegStatsToMatchedRecords(measureOps, segmentSearchRecords, req, blockSummaries, timeRange,
   728  		fileParallelism, queryMetrics, qid)
   729  	if err != nil {
   730  		log.Errorf("qid=%d, failed to raw compute segstats %+v", qid, err)
   731  		return nil, err
   732  	}
   733  
   734  	finalMatched, finalUnmatched := segmentSearchRecords.getTotalCounts()
   735  	segmentSearchRecords.Close()
   736  	queryMetrics.SetNumRecordsMatched(finalMatched)
   737  	queryMetrics.SetNumRecordsUnmatched(finalUnmatched)
   738  
   739  	timeElapsed := time.Since(sTime)
   740  	qs.UpdateSummary(summary.RAW, timeElapsed, queryMetrics)
   741  	return segStats, nil
   742  }