github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/query/remote/server.go (about) 1 // Copyright (c) 2018 Uber Technologies, Inc. 2 // 3 // Permission is hereby granted, free of charge, to any person obtaining a copy 4 // of this software and associated documentation files (the "Software"), to deal 5 // in the Software without restriction, including without limitation the rights 6 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 // copies of the Software, and to permit persons to whom the Software is 8 // furnished to do so, subject to the following conditions: 9 // 10 // The above copyright notice and this permission notice shall be included in 11 // all copies or substantial portions of the Software. 12 // 13 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 // THE SOFTWARE. 20 21 package remote 22 23 import ( 24 "context" 25 "sync" 26 "time" 27 28 "github.com/m3db/m3/src/dbnode/encoding" 29 rpc "github.com/m3db/m3/src/query/generated/proto/rpcpb" 30 "github.com/m3db/m3/src/query/models" 31 "github.com/m3db/m3/src/query/pools" 32 "github.com/m3db/m3/src/query/storage/m3" 33 "github.com/m3db/m3/src/query/storage/m3/consolidators" 34 "github.com/m3db/m3/src/query/util/logging" 35 "github.com/m3db/m3/src/x/instrument" 36 37 "go.uber.org/zap" 38 "google.golang.org/grpc" 39 ) 40 41 const poolTimeout = time.Second * 10 42 43 // TODO: adjust default batch based on message size; huge series can 44 // unfortunately overwhelm this number. 45 const defaultBatch = 128 46 47 // TODO: add metrics 48 type grpcServer struct { 49 createAt time.Time 50 poolErr error 51 batchSize int 52 querier m3.Querier 53 queryContextOpts models.QueryContextOptions 54 poolWrapper *pools.PoolWrapper 55 once sync.Once 56 pools encoding.IteratorPools 57 instrumentOpts instrument.Options 58 } 59 60 func min(a, b int) int { 61 if a < b { 62 return a 63 } 64 65 return b 66 } 67 68 // NewGRPCServer builds a grpc server which must be started later. 69 func NewGRPCServer( 70 querier m3.Querier, 71 queryContextOpts models.QueryContextOptions, 72 poolWrapper *pools.PoolWrapper, 73 instrumentOpts instrument.Options, 74 ) *grpc.Server { 75 server := grpc.NewServer() 76 grpcServer := &grpcServer{ 77 createAt: time.Now(), 78 querier: querier, 79 queryContextOpts: queryContextOpts, 80 poolWrapper: poolWrapper, 81 instrumentOpts: instrumentOpts, 82 } 83 84 rpc.RegisterQueryServer(server, grpcServer) 85 return server 86 } 87 88 func (s *grpcServer) Health( 89 ctx context.Context, 90 req *rpc.HealthRequest, 91 ) (*rpc.HealthResponse, error) { 92 uptime := time.Since(s.createAt) 93 return &rpc.HealthResponse{ 94 UptimeDuration: uptime.String(), 95 UptimeNanoseconds: int64(uptime), 96 }, nil 97 } 98 99 func (s *grpcServer) waitForPools() (encoding.IteratorPools, error) { 100 s.once.Do(func() { 101 s.pools, s.poolErr = s.poolWrapper.WaitForIteratorPools(poolTimeout) 102 }) 103 104 return s.pools, s.poolErr 105 } 106 107 // Fetch reads decompressed series from M3 storage. 108 func (s *grpcServer) Fetch( 109 message *rpc.FetchRequest, 110 stream rpc.Query_FetchServer, 111 ) error { 112 ctx := retrieveMetadata(stream.Context(), s.instrumentOpts) 113 logger := logging.WithContext(ctx, s.instrumentOpts) 114 storeQuery, err := decodeFetchRequest(message) 115 if err != nil { 116 logger.Error("unable to decode fetch query", zap.Error(err)) 117 return err 118 } 119 120 fetchOpts, err := decodeFetchOptions(message.GetOptions()) 121 if err != nil { 122 logger.Error("unable to decode options", zap.Error(err)) 123 return err 124 } 125 126 fetchOpts.Remote = true 127 if fetchOpts.SeriesLimit == 0 { 128 // Allow default to be set if not explicitly passed. 129 fetchOpts.SeriesLimit = s.queryContextOpts.LimitMaxTimeseries 130 } 131 132 if fetchOpts.DocsLimit == 0 { 133 // Allow default to be set if not explicitly passed. 134 fetchOpts.DocsLimit = s.queryContextOpts.LimitMaxDocs 135 } 136 137 result, cleanup, err := s.querier.FetchCompressedResult(ctx, storeQuery, fetchOpts) 138 defer cleanup() 139 if err != nil { 140 logger.Error("unable to fetch local query", zap.Error(err)) 141 return err 142 } 143 144 pools, err := s.waitForPools() 145 if err != nil { 146 logger.Error("unable to get pools", zap.Error(err)) 147 return err 148 } 149 150 results, err := encodeToCompressedSeries(result, pools) 151 if err != nil { 152 logger.Error("unable to compress query", zap.Error(err)) 153 return err 154 } 155 156 resultMeta := encodeResultMetadata(result.Metadata) 157 size := min(defaultBatch, len(results)) 158 for ; len(results) > 0; results = results[size:] { 159 size = min(size, len(results)) 160 response := &rpc.FetchResponse{ 161 Series: results[:size], 162 Meta: resultMeta, 163 } 164 165 err = stream.Send(response) 166 if err != nil { 167 logger.Error("unable to send fetch result", zap.Error(err)) 168 return err 169 } 170 } 171 172 return nil 173 } 174 175 func (s *grpcServer) Search( 176 message *rpc.SearchRequest, 177 stream rpc.Query_SearchServer, 178 ) error { 179 var err error 180 181 ctx := retrieveMetadata(stream.Context(), s.instrumentOpts) 182 logger := logging.WithContext(ctx, s.instrumentOpts) 183 searchQuery, err := decodeSearchRequest(message) 184 if err != nil { 185 logger.Error("unable to decode search query", zap.Error(err)) 186 return err 187 } 188 189 fetchOpts, err := decodeFetchOptions(message.GetOptions()) 190 if err != nil { 191 logger.Error("unable to decode options", zap.Error(err)) 192 return err 193 } 194 195 searchResults, cleanup, err := s.querier.SearchCompressed(ctx, searchQuery, 196 fetchOpts) 197 defer cleanup() 198 if err != nil { 199 logger.Error("unable to search tags", zap.Error(err)) 200 return err 201 } 202 203 pools, err := s.waitForPools() 204 if err != nil { 205 logger.Error("unable to get pools", zap.Error(err)) 206 return err 207 } 208 209 results := searchResults.Tags 210 size := min(defaultBatch, len(results)) 211 for ; len(results) > 0; results = results[size:] { 212 size = min(size, len(results)) 213 response, err := encodeToCompressedSearchResult(results[:size], 214 searchResults.Metadata, pools) 215 if err != nil { 216 logger.Error("unable to encode search result", zap.Error(err)) 217 return err 218 } 219 220 err = stream.Send(response) 221 if err != nil { 222 logger.Error("unable to send search result", zap.Error(err)) 223 return err 224 } 225 } 226 227 return nil 228 } 229 230 func (s *grpcServer) CompleteTags( 231 message *rpc.CompleteTagsRequest, 232 stream rpc.Query_CompleteTagsServer, 233 ) error { 234 var err error 235 236 ctx := retrieveMetadata(stream.Context(), s.instrumentOpts) 237 logger := logging.WithContext(ctx, s.instrumentOpts) 238 completeTagsQuery, err := decodeCompleteTagsRequest(message) 239 if err != nil { 240 logger.Error("unable to decode complete tags query", zap.Error(err)) 241 return err 242 } 243 244 fetchOpts, err := decodeFetchOptions(message.GetOptions().GetOptions()) 245 if err != nil { 246 logger.Error("unable to decode options", zap.Error(err)) 247 return err 248 } 249 250 completed, err := s.querier.CompleteTagsCompressed(ctx, completeTagsQuery, fetchOpts) 251 if err != nil { 252 logger.Error("unable to complete tags", zap.Error(err)) 253 return err 254 } 255 256 tags := completed.CompletedTags 257 size := min(defaultBatch, len(tags)) 258 for ; len(tags) > 0; tags = tags[size:] { 259 size = min(size, len(tags)) 260 results := &consolidators.CompleteTagsResult{ 261 CompleteNameOnly: completed.CompleteNameOnly, 262 CompletedTags: tags[:size], 263 Metadata: completed.Metadata, 264 } 265 266 response, err := encodeToCompressedCompleteTagsResult(results) 267 if err != nil { 268 logger.Error("unable to encode complete tags result", zap.Error(err)) 269 return err 270 } 271 272 err = stream.Send(response) 273 if err != nil { 274 logger.Error("unable to send complete tags result", zap.Error(err)) 275 return err 276 } 277 } 278 279 return nil 280 }