github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/query/remote/compressed_codecs.go (about) 1 // Copyright (c) 2018 Uber Technologies, Inc. 2 // 3 // Permission is hereby granted, free of charge, to any person obtaining a copy 4 // of this software and associated documentation files (the "Software"), to deal 5 // in the Software without restriction, including without limitation the rights 6 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 // copies of the Software, and to permit persons to whom the Software is 8 // furnished to do so, subject to the following conditions: 9 // 10 // The above copyright notice and this permission notice shall be included in 11 // all copies or substantial portions of the Software. 12 // 13 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 // THE SOFTWARE. 20 21 package remote 22 23 import ( 24 "errors" 25 "fmt" 26 "time" 27 28 "github.com/m3db/m3/src/dbnode/encoding" 29 "github.com/m3db/m3/src/dbnode/ts" 30 "github.com/m3db/m3/src/dbnode/x/xio" 31 "github.com/m3db/m3/src/dbnode/x/xpool" 32 queryerrors "github.com/m3db/m3/src/query/errors" 33 rpc "github.com/m3db/m3/src/query/generated/proto/rpcpb" 34 "github.com/m3db/m3/src/query/storage/m3/consolidators" 35 "github.com/m3db/m3/src/x/checked" 36 "github.com/m3db/m3/src/x/ident" 37 "github.com/m3db/m3/src/x/serialize" 38 xtime "github.com/m3db/m3/src/x/time" 39 ) 40 41 var ( 42 errDecodeNoIteratorPools = errors.New("no iterator pools for decoding") 43 ) 44 45 func compressedSegmentFromBlockReader(br xio.BlockReader) (*rpc.M3Segment, error) { 46 segment, err := br.Segment() 47 if err != nil { 48 return nil, err 49 } 50 51 return &rpc.M3Segment{ 52 Head: segment.Head.Bytes(), 53 Tail: segment.Tail.Bytes(), 54 StartTime: int64(br.Start), 55 BlockSize: int64(br.BlockSize), 56 Checksum: segment.CalculateChecksum(), 57 }, nil 58 } 59 60 func compressedSegmentsFromReaders( 61 readers xio.ReaderSliceOfSlicesIterator, 62 ) (*rpc.M3Segments, error) { 63 segments := &rpc.M3Segments{} 64 l, _, _ := readers.CurrentReaders() 65 // NB(arnikola) If there's only a single reader, the segment has been merged 66 // otherwise, multiple unmerged segments exist. 67 if l == 1 { 68 br := readers.CurrentReaderAt(0) 69 segment, err := compressedSegmentFromBlockReader(br) 70 if err != nil { 71 return nil, err 72 } 73 74 segments.Merged = segment 75 } else { 76 unmerged := make([]*rpc.M3Segment, 0, l) 77 for i := 0; i < l; i++ { 78 br := readers.CurrentReaderAt(i) 79 segment, err := compressedSegmentFromBlockReader(br) 80 if err != nil { 81 return nil, err 82 } 83 unmerged = append(unmerged, segment) 84 } 85 86 segments.Unmerged = unmerged 87 } 88 89 return segments, nil 90 } 91 92 func compressedTagsFromTagIterator( 93 tagIter ident.TagIterator, 94 encoderPool serialize.TagEncoderPool, 95 ) ([]byte, error) { 96 encoder := encoderPool.Get() 97 err := encoder.Encode(tagIter) 98 if err != nil { 99 return nil, err 100 } 101 102 defer encoder.Finalize() 103 data, encoded := encoder.Data() 104 if !encoded { 105 return nil, fmt.Errorf("no refs available to data") 106 } 107 108 db := data.Bytes() 109 // Need to copy the encoded bytes to a buffer as the encoder keeps a reference to them 110 // TODO(arnikola): pool this when implementing https://github.com/m3db/m3/issues/1015 111 return append(make([]byte, 0, len(db)), db...), nil 112 } 113 114 func buildTags(tagIter ident.TagIterator, iterPools encoding.IteratorPools) ([]byte, error) { 115 if iterPools != nil { 116 encoderPool := iterPools.TagEncoder() 117 if encoderPool != nil { 118 return compressedTagsFromTagIterator(tagIter, encoderPool) 119 } 120 } 121 122 return nil, queryerrors.ErrCannotEncodeCompressedTags 123 } 124 125 // CompressedSeriesFromSeriesIterator builds compressed rpc series from a SeriesIterator 126 // SeriesIterator is the top level iterator returned by m3db 127 func CompressedSeriesFromSeriesIterator( 128 it encoding.SeriesIterator, 129 iterPools encoding.IteratorPools, 130 ) (*rpc.Series, error) { 131 // This SeriesIterator contains MultiReaderIterators, each representing a single 132 // replica. Each MultiReaderIterator has a ReaderSliceOfSlicesIterator where each 133 // step through the iterator exposes a slice of underlying BlockReaders. Each 134 // BlockReader contains the run time encoded bytes that represent the series. 135 // 136 // SeriesIterator also has a TagIterator representing the tags associated with it. 137 // 138 // This function transforms a SeriesIterator into a protobuf representation to be 139 // able to send it across the wire without needing to expand the series. 140 // 141 // If reset argument is true, the SeriesIterator readers will be reset so it can 142 // be iterated again. If false, the SeriesIterator will no longer be useable. 143 replicas, err := it.Replicas() 144 if err != nil { 145 return nil, err 146 } 147 148 compressedReplicas := make([]*rpc.M3CompressedValuesReplica, 0, len(replicas)) 149 for _, replica := range replicas { 150 replicaSegments := make([]*rpc.M3Segments, 0, len(replicas)) 151 readers := replica.Readers() 152 idx := readers.Index() 153 for next := true; next; next = readers.Next() { 154 segments, err := compressedSegmentsFromReaders(readers) 155 if err != nil { 156 return nil, err 157 } 158 replicaSegments = append(replicaSegments, segments) 159 } 160 161 // Restore the original index of the reader so the caller can resume 162 // the iterator at the expected state. This is safe because we do not 163 // consume any of the internal block readers within the iterator. 164 // It cannot be asserted that iters are passed in here at idx 0 which is 165 // why we make sure to rewind to the specific original index. 166 readers.RewindToIndex(idx) 167 168 r := &rpc.M3CompressedValuesReplica{ 169 Segments: replicaSegments, 170 } 171 compressedReplicas = append(compressedReplicas, r) 172 } 173 174 start := int64(it.Start()) 175 end := int64(it.End()) 176 177 itTags := it.Tags() 178 defer itTags.Rewind() 179 tags, err := buildTags(itTags, iterPools) 180 if err != nil { 181 return nil, err 182 } 183 184 return &rpc.Series{ 185 Meta: &rpc.SeriesMetadata{ 186 Id: it.ID().Bytes(), 187 StartTime: start, 188 EndTime: end, 189 }, 190 Value: &rpc.Series_Compressed{ 191 Compressed: &rpc.M3CompressedSeries{ 192 CompressedTags: tags, 193 Replicas: compressedReplicas, 194 }, 195 }, 196 }, nil 197 } 198 199 // encodeToCompressedSeries encodes SeriesIterators to compressed series. 200 func encodeToCompressedSeries( 201 results consolidators.SeriesFetchResult, 202 iterPools encoding.IteratorPools, 203 ) ([]*rpc.Series, error) { 204 iters := results.SeriesIterators() 205 seriesList := make([]*rpc.Series, 0, len(iters)) 206 for _, iter := range iters { 207 series, err := CompressedSeriesFromSeriesIterator(iter, iterPools) 208 if err != nil { 209 return nil, err 210 } 211 212 seriesList = append(seriesList, series) 213 } 214 215 return seriesList, nil 216 } 217 218 func segmentBytesFromCompressedSegment( 219 segHead, segTail []byte, 220 checkedBytesWrapperPool xpool.CheckedBytesWrapperPool, 221 ) (checked.Bytes, checked.Bytes) { 222 return checkedBytesWrapperPool.Get(segHead), checkedBytesWrapperPool.Get(segTail) 223 } 224 225 func blockReaderFromCompressedSegment( 226 seg *rpc.M3Segment, 227 checkedBytesWrapperPool xpool.CheckedBytesWrapperPool, 228 ) xio.BlockReader { 229 head, tail := segmentBytesFromCompressedSegment(seg.GetHead(), seg.GetTail(), checkedBytesWrapperPool) 230 segment := ts.NewSegment(head, tail, seg.GetChecksum(), ts.FinalizeNone) 231 segmentReader := xio.NewSegmentReader(segment) 232 233 return xio.BlockReader{ 234 SegmentReader: segmentReader, 235 Start: xtime.UnixNano(seg.GetStartTime()), 236 BlockSize: time.Duration(seg.GetBlockSize()), 237 } 238 } 239 240 func tagIteratorFromCompressedTagsWithDecoder( 241 compressedTags []byte, 242 iterPools encoding.IteratorPools, 243 ) (ident.TagIterator, error) { 244 if iterPools == nil || iterPools.CheckedBytesWrapper() == nil || iterPools.TagDecoder() == nil { 245 return nil, queryerrors.ErrCannotDecodeCompressedTags 246 } 247 248 checkedBytes := iterPools.CheckedBytesWrapper().Get(compressedTags) 249 decoder := iterPools.TagDecoder().Get() 250 decoder.Reset(checkedBytes) 251 defer decoder.Close() 252 // Copy underlying TagIterator bytes before closing the decoder and returning it to the pool 253 return decoder.Duplicate(), nil 254 } 255 256 func tagIteratorFromSeries( 257 series *rpc.M3CompressedSeries, 258 iteratorPools encoding.IteratorPools, 259 ) (ident.TagIterator, error) { 260 if series != nil && len(series.GetCompressedTags()) > 0 { 261 return tagIteratorFromCompressedTagsWithDecoder( 262 series.GetCompressedTags(), 263 iteratorPools, 264 ) 265 } 266 267 return iteratorPools.TagDecoder().Get().Duplicate(), nil 268 } 269 270 func blockReadersFromCompressedSegments( 271 segments []*rpc.M3Segments, 272 checkedBytesWrapperPool xpool.CheckedBytesWrapperPool, 273 ) [][]xio.BlockReader { 274 blockReaders := make([][]xio.BlockReader, len(segments)) 275 276 for i, segment := range segments { 277 blockReadersPerSegment := make([]xio.BlockReader, 0, len(segments)) 278 mergedSegment := segment.GetMerged() 279 if mergedSegment != nil { 280 reader := blockReaderFromCompressedSegment(mergedSegment, checkedBytesWrapperPool) 281 blockReadersPerSegment = append(blockReadersPerSegment, reader) 282 } else { 283 unmerged := segment.GetUnmerged() 284 for _, seg := range unmerged { 285 reader := blockReaderFromCompressedSegment(seg, checkedBytesWrapperPool) 286 blockReadersPerSegment = append(blockReadersPerSegment, reader) 287 } 288 } 289 290 blockReaders[i] = blockReadersPerSegment 291 } 292 293 return blockReaders 294 } 295 296 /* 297 Creates a SeriesIterator from a compressed protobuf. This is the reverse of 298 CompressedSeriesFromSeriesIterator, and takes an optional iteratorPool 299 argument that allows reuse of the underlying iterator pools from the m3db session. 300 */ 301 func seriesIteratorFromCompressedSeries( 302 timeSeries *rpc.M3CompressedSeries, 303 meta *rpc.SeriesMetadata, 304 iteratorPools encoding.IteratorPools, 305 ) (encoding.SeriesIterator, error) { 306 // NB: Attempt to decompress compressed tags first as this is the only scenario 307 // that is expected to fail. 308 tagIter, err := tagIteratorFromSeries(timeSeries, iteratorPools) 309 if err != nil { 310 return nil, err 311 } 312 313 replicas := timeSeries.GetReplicas() 314 315 multiReaderPool := iteratorPools.MultiReaderIterator() 316 seriesIterPool := iteratorPools.SeriesIterator() 317 checkedBytesWrapperPool := iteratorPools.CheckedBytesWrapper() 318 idPool := iteratorPools.ID() 319 320 allReplicaIterators := iteratorPools.MultiReaderIteratorArray().Get(len(replicas)) 321 322 for _, replica := range replicas { 323 blockReaders := blockReadersFromCompressedSegments(replica.GetSegments(), checkedBytesWrapperPool) 324 325 // TODO arnikola investigate pooling these? 326 sliceOfSlicesIterator := xio.NewReaderSliceOfSlicesFromBlockReadersIterator(blockReaders) 327 perReplicaIterator := multiReaderPool.Get() 328 perReplicaIterator.ResetSliceOfSlices(sliceOfSlicesIterator, nil) 329 330 allReplicaIterators = append(allReplicaIterators, perReplicaIterator) 331 } 332 333 id := idPool.BinaryID(checkedBytesWrapperPool.Get(meta.GetId())) 334 start := xtime.UnixNano(meta.GetStartTime()) 335 end := xtime.UnixNano(meta.GetEndTime()) 336 337 seriesIter := seriesIterPool.Get() 338 seriesIter.Reset(encoding.SeriesIteratorOptions{ 339 ID: id, 340 Tags: tagIter, 341 StartInclusive: start, 342 EndExclusive: end, 343 Replicas: allReplicaIterators, 344 }) 345 346 return seriesIter, nil 347 } 348 349 // DecodeCompressedFetchResponse decodes compressed fetch 350 // response to seriesIterators. 351 func DecodeCompressedFetchResponse( 352 fetchResult *rpc.FetchResponse, 353 iteratorPools encoding.IteratorPools, 354 ) (encoding.SeriesIterators, error) { 355 if iteratorPools == nil { 356 return nil, errDecodeNoIteratorPools 357 } 358 359 var ( 360 rpcSeries = fetchResult.GetSeries() 361 numSeries = len(rpcSeries) 362 iters = encoding.NewSizedSeriesIterators(numSeries) 363 ) 364 365 for i, series := range rpcSeries { 366 compressed := series.GetCompressed() 367 if compressed == nil { 368 continue 369 } 370 371 iter, err := seriesIteratorFromCompressedSeries( 372 compressed, 373 series.GetMeta(), 374 iteratorPools, 375 ) 376 if err != nil { 377 return nil, err 378 } 379 380 iters.SetAt(i, iter) 381 } 382 383 return iters, nil 384 }