github.com/rohankumardubey/aresdb@v0.0.2-0.20190517170215-e54e3ca06b9c/query/aql_nonaggr_batchexecutor.go (about) 1 // Copyright (c) 2017-2018 Uber Technologies, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package query 16 17 import ( 18 "github.com/uber/aresdb/memutils" 19 queryCom "github.com/uber/aresdb/query/common" 20 "unsafe" 21 ) 22 23 // NonAggrBatchExecutorImpl is batch executor implementation for non-aggregation query 24 type NonAggrBatchExecutorImpl struct { 25 *BatchExecutorImpl 26 } 27 28 // project for non-aggregation query will only calculate the selected columns 29 // measure calculation, reduce will be skipped, once the generated result reaches limit, it will return and cancel all other ongoing processing. 30 func (e *NonAggrBatchExecutorImpl) project() { 31 // Prepare for dimension and measure evaluation. 32 e.prepareForDimEval(e.qc.OOPK.DimRowBytes, e.qc.OOPK.NumDimsPerDimWidth, e.stream) 33 34 e.qc.reportTimingForCurrentBatch(e.stream, &e.start, prepareForDimAndMeasureTiming) 35 // for non-aggregation query, we always write from start for dimension output 36 e.evalDimensions(0) 37 // uncompress the result from baseCount 38 e.expandDimensions(e.qc.OOPK.NumDimsPerDimWidth) 39 // wait for stream to clean up non used buffer before final aggregation 40 memutils.WaitForCudaStream(e.stream, e.qc.Device) 41 e.qc.OOPK.currentBatch.cleanupBeforeAggregation() 42 43 if e.qc.OOPK.currentBatch.resultSize >= e.qc.Query.Limit { 44 e.qc.OOPK.done = true 45 } 46 } 47 48 func (e *NonAggrBatchExecutorImpl) reduce() { 49 // nothing need to do for non-aggregation query 50 } 51 52 func (e *NonAggrBatchExecutorImpl) expandDimensions(numDims queryCom.DimCountsPerDimWidth) { 53 bc := &e.qc.OOPK.currentBatch 54 if bc.size == 0 { 55 //nothing to do 56 return 57 } 58 59 if bc.baseCountD.isNull() { 60 // baseCountD is null, no uncompression is needed 61 asyncCopyDimensionVector(bc.dimensionVectorD[1].getPointer(), bc.dimensionVectorD[0].getPointer(), bc.size, bc.resultSize, 62 numDims, bc.resultCapacity, bc.resultCapacity, memutils.AsyncCopyDeviceToDevice, e.stream, e.qc.Device) 63 bc.resultSize += bc.size 64 return 65 } 66 67 e.qc.doProfile(func() { 68 e.qc.OOPK.currentBatch.expand(numDims, bc.size, e.stream, e.qc.Device) 69 e.qc.reportTimingForCurrentBatch(e.stream, &e.start, expandEvalTiming) 70 }, "expand", e.stream) 71 } 72 73 func (e *NonAggrBatchExecutorImpl) prepareForDimEval( 74 dimRowBytes int, numDimsPerDimWidth queryCom.DimCountsPerDimWidth, stream unsafe.Pointer) { 75 76 bc := &e.qc.OOPK.currentBatch 77 if bc.resultCapacity == 0 { 78 bc.resultCapacity = e.qc.Query.Limit 79 bc.dimensionVectorD = [2]devicePointer{ 80 deviceAllocate(bc.resultCapacity*dimRowBytes, bc.device), 81 deviceAllocate(bc.resultCapacity*dimRowBytes, bc.device), 82 } 83 } 84 // to keep the consistency of the output dimension vector 85 bc.dimensionVectorD[0], bc.dimensionVectorD[1] = bc.dimensionVectorD[1], bc.dimensionVectorD[0] 86 // maximum rows needed from filter result 87 lenWanted := bc.resultCapacity - bc.resultSize 88 if bc.size > lenWanted { 89 bc.size = lenWanted 90 } 91 }