github.com/siglens/siglens@v0.0.0-20240328180423-f7ce9ae441ed/pkg/segment/search/metricssearch.go (about)

     1  /*
     2  Copyright 2023.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package search
    18  
    19  import (
    20  	"fmt"
    21  	"sync"
    22  	"time"
    23  
    24  	dtu "github.com/siglens/siglens/pkg/common/dtypeutils"
    25  	"github.com/siglens/siglens/pkg/segment/memory/limit"
    26  	"github.com/siglens/siglens/pkg/segment/query/summary"
    27  	"github.com/siglens/siglens/pkg/segment/reader/metrics/series"
    28  	"github.com/siglens/siglens/pkg/segment/results/mresults"
    29  	tsidtracker "github.com/siglens/siglens/pkg/segment/results/mresults/tsid"
    30  	"github.com/siglens/siglens/pkg/segment/structs"
    31  	"github.com/siglens/siglens/pkg/segment/utils"
    32  	"github.com/siglens/siglens/pkg/utils/semaphore"
    33  	log "github.com/sirupsen/logrus"
    34  )
    35  
    36  var metricSearch *semaphore.WeightedSemaphore
    37  
    38  func init() {
    39  	metricSearch = semaphore.NewWeightedSemaphore(5, "metricsearch.limiter", time.Minute)
    40  }
    41  
    42  func RawSearchMetricsSegment(mQuery *structs.MetricsQuery, tsidInfo *tsidtracker.AllMatchedTSIDs, req *structs.MetricsSearchRequest, res *mresults.MetricsResult,
    43  	timeRange *dtu.MetricsTimeRange, qid uint64, querySummary *summary.QuerySummary) {
    44  
    45  	if req == nil {
    46  		log.Errorf("qid=%d, RawSearchMetricsSegment: received a nil search request", qid)
    47  		res.AddError(fmt.Errorf("received a nil search request"))
    48  		return
    49  	} else if req.Parallelism <= 0 {
    50  		log.Errorf("qid=%d, RawSearchMetricsSegment: invalid fileParallelism of %d - must be > 0", qid, req.Parallelism)
    51  		res.AddError(fmt.Errorf("invalid fileParallelism - must be > 0"))
    52  		return
    53  	}
    54  
    55  	err := metricSearch.TryAcquireWithBackoff(1, 5, fmt.Sprintf("qid.%d", qid))
    56  	if err != nil {
    57  		log.Errorf("qid=%d RawSearchMetricsSegment: Failed to Acquire resources for raw search! error %+v", qid, err)
    58  		res.AddError(err)
    59  		return
    60  	}
    61  	defer metricSearch.Release(1)
    62  	searchMemory := uint64(utils.MAX_RAW_DATAPOINTS_IN_RESULT*12 + 80)
    63  	err = limit.RequestSearchMemory(searchMemory)
    64  	if err != nil {
    65  		log.Errorf("qid=%d, RawSearchMetricsSegment: Failed to acquire memory from global pool for search! Error: %v", qid, err)
    66  		res.AddError(err)
    67  		return
    68  	}
    69  
    70  	sharedBlockIterators, err := series.InitSharedTimeSeriesSegmentReader(req.MetricsKeyBaseDir, int(req.Parallelism))
    71  	if err != nil {
    72  		log.Errorf("qid=%d, RawSearchMetricsSegment: Error initialising a time series reader. Error: %v", qid, err)
    73  		res.AddError(err)
    74  		return
    75  	}
    76  	defer sharedBlockIterators.Close()
    77  
    78  	blockNumChan := make(chan int, len(req.BlocksToSearch))
    79  	for blkNum := range req.BlocksToSearch {
    80  		blockNumChan <- int(blkNum)
    81  	}
    82  	close(blockNumChan)
    83  	var wg sync.WaitGroup
    84  	for i := 0; i < int(req.Parallelism); i++ {
    85  		wg.Add(1)
    86  		go blockWorker(i, sharedBlockIterators.TimeSeriesBlockReader[i], blockNumChan, tsidInfo, mQuery, timeRange, res, qid, &wg, querySummary)
    87  	}
    88  	wg.Wait()
    89  }
    90  
    91  func blockWorker(workerID int, sharedReader *series.TimeSeriesSegmentReader, blockNumChan <-chan int, tsidInfo *tsidtracker.AllMatchedTSIDs,
    92  	mQuery *structs.MetricsQuery, timeRange *dtu.MetricsTimeRange, res *mresults.MetricsResult, qid uint64, wg *sync.WaitGroup, querySummary *summary.QuerySummary) {
    93  	defer wg.Done()
    94  	queryMetrics := &structs.MetricsQueryProcessingMetrics{
    95  		UpdateLock: &sync.Mutex{},
    96  	}
    97  	localRes := mresults.InitMetricResults(mQuery, qid)
    98  	for blockNum := range blockNumChan {
    99  		tsbr, err := sharedReader.InitReaderForBlock(uint16(blockNum), queryMetrics)
   100  		if err != nil {
   101  			log.Errorf("qid=%d, RawSearchMetricsSegment.blockWorker: Error initialising a block reader. Error: %v", qid, err)
   102  			res.AddError(err)
   103  			continue
   104  		}
   105  
   106  		querySummary.UpdateTimeLoadingTSOFiles(queryMetrics.TimeLoadingTSOFiles)
   107  		querySummary.UpdateTimeLoadingTSGFiles(queryMetrics.TimeLoadingTSGFiles)
   108  		for tsid, tsGroupId := range tsidInfo.GetAllTSIDs() {
   109  			tsitr, found, err := tsbr.GetTimeSeriesIterator(tsid)
   110  			queryMetrics.IncrementNumSeriesSearched(1)
   111  			if err != nil {
   112  				log.Errorf("qid=%d, RawSearchMetricsSegment.blockWorker: Error getting the time series iterator. Error: %v", qid, err)
   113  				res.AddError(err)
   114  				continue
   115  			}
   116  			if !found {
   117  				continue
   118  			}
   119  			series := mresults.InitSeriesHolder(mQuery, tsGroupId)
   120  			for tsitr.Next() {
   121  				ts, dp := tsitr.At()
   122  				if !timeRange.CheckInRange(ts) {
   123  					continue
   124  				}
   125  				series.AddEntry(ts, dp)
   126  			}
   127  			err = tsitr.Err()
   128  			if err != nil {
   129  				log.Errorf("RawSearchMetricsSegment.blockWorker: iterator failed %v for worker id %v", err, workerID)
   130  				res.AddError(err)
   131  			}
   132  			if series.GetIdx() > 0 {
   133  				localRes.AddSeries(series, tsid, tsGroupId)
   134  			}
   135  		}
   136  	}
   137  	err := res.Merge(localRes)
   138  	if err != nil {
   139  		res.AddError(err)
   140  		log.Errorf("Failed to merge local results to global results!")
   141  	}
   142  	queryMetrics.IncrementNumMetricsSegmentsSearched(1)
   143  	querySummary.UpdateMetricsSummary(queryMetrics)
   144  }