github.com/m3db/m3@v1.5.0/src/dbnode/storage/block/merged_block_reader.go (about)

     1  // Copyright (c) 2016 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  package block
    22  
    23  import (
    24  	"fmt"
    25  	"sync"
    26  	"time"
    27  
    28  	"github.com/m3db/m3/src/dbnode/encoding"
    29  	"github.com/m3db/m3/src/dbnode/namespace"
    30  	"github.com/m3db/m3/src/dbnode/ts"
    31  	"github.com/m3db/m3/src/dbnode/x/xio"
    32  	"github.com/m3db/m3/src/x/context"
    33  	"github.com/m3db/m3/src/x/pool"
    34  	xtime "github.com/m3db/m3/src/x/time"
    35  )
    36  
    37  type dbMergedBlockReader struct {
    38  	sync.RWMutex
    39  	ctx        context.Context
    40  	opts       Options
    41  	blockStart xtime.UnixNano
    42  	blockSize  time.Duration
    43  	streams    [2]mergeableStream
    44  	readers    [2]xio.SegmentReader
    45  	merged     xio.BlockReader
    46  	encoder    encoding.Encoder
    47  	err        error
    48  	nsCtx      namespace.Context
    49  }
    50  
    51  type mergeableStream struct {
    52  	stream   xio.SegmentReader
    53  	finalize bool
    54  }
    55  
    56  func (ms mergeableStream) clone(pool pool.CheckedBytesPool) (mergeableStream, error) {
    57  	stream, err := ms.stream.Clone(pool)
    58  	if err != nil {
    59  		return mergeableStream{}, err
    60  	}
    61  	return mergeableStream{
    62  		stream:   stream,
    63  		finalize: ms.finalize,
    64  	}, nil
    65  }
    66  
    67  func newDatabaseMergedBlockReader(
    68  	nsCtx namespace.Context,
    69  	blockStart xtime.UnixNano,
    70  	blockSize time.Duration,
    71  	streamA, streamB mergeableStream,
    72  	opts Options,
    73  ) xio.BlockReader {
    74  	r := &dbMergedBlockReader{
    75  		ctx:        opts.ContextPool().Get(),
    76  		nsCtx:      nsCtx,
    77  		opts:       opts,
    78  		blockStart: blockStart,
    79  		blockSize:  blockSize,
    80  	}
    81  	r.streams[0] = streamA
    82  	r.streams[1] = streamB
    83  	r.readers[0] = streamA.stream
    84  	r.readers[1] = streamB.stream
    85  	return xio.BlockReader{
    86  		SegmentReader: r,
    87  		Start:         blockStart,
    88  		BlockSize:     blockSize,
    89  	}
    90  }
    91  
    92  func (r *dbMergedBlockReader) mergedReader() (xio.BlockReader, error) {
    93  	r.RLock()
    94  	if r.merged.IsNotEmpty() || r.err != nil {
    95  		r.RUnlock()
    96  		return r.merged, r.err
    97  	}
    98  	r.RUnlock()
    99  
   100  	r.Lock()
   101  	defer r.Unlock()
   102  
   103  	if r.merged.IsNotEmpty() || r.err != nil {
   104  		return r.merged, r.err
   105  	}
   106  
   107  	multiIter := r.opts.MultiReaderIteratorPool().Get()
   108  	multiIter.Reset(r.readers[:], r.blockStart, r.blockSize, r.nsCtx.Schema)
   109  	defer multiIter.Close()
   110  
   111  	r.encoder = r.opts.EncoderPool().Get()
   112  	r.encoder.Reset(r.blockStart, r.opts.DatabaseBlockAllocSize(), r.nsCtx.Schema)
   113  
   114  	for multiIter.Next() {
   115  		dp, unit, annotation := multiIter.Current()
   116  		err := r.encoder.Encode(dp, unit, annotation)
   117  		if err != nil {
   118  			r.encoder.Close()
   119  			r.err = err
   120  			return xio.EmptyBlockReader, err
   121  		}
   122  	}
   123  	if err := multiIter.Err(); err != nil {
   124  		r.encoder.Close()
   125  		r.err = err
   126  		return xio.EmptyBlockReader, err
   127  	}
   128  
   129  	// Release references to the existing streams
   130  	for i := range r.streams {
   131  		if r.streams[i].stream != nil && r.streams[i].finalize {
   132  			r.streams[i].stream.Finalize()
   133  		}
   134  		r.streams[i].stream = nil
   135  	}
   136  	for i := range r.readers {
   137  		r.readers[i] = nil
   138  	}
   139  
   140  	// Can ignore OK here because BlockReader will handle nil streams
   141  	// properly.
   142  	stream, _ := r.encoder.Stream(r.ctx)
   143  	r.merged = xio.BlockReader{
   144  		SegmentReader: stream,
   145  		Start:         r.blockStart,
   146  		BlockSize:     r.blockSize,
   147  	}
   148  
   149  	return r.merged, nil
   150  }
   151  
   152  func (r *dbMergedBlockReader) Clone(
   153  	pool pool.CheckedBytesPool,
   154  ) (xio.SegmentReader, error) {
   155  	s0, err := r.streams[0].clone(pool)
   156  	if err != nil {
   157  		return nil, err
   158  	}
   159  	s1, err := r.streams[1].clone(pool)
   160  	if err != nil {
   161  		return nil, err
   162  	}
   163  	return newDatabaseMergedBlockReader(
   164  		r.nsCtx,
   165  		r.blockStart,
   166  		r.blockSize,
   167  		s0,
   168  		s1,
   169  		r.opts,
   170  	), nil
   171  }
   172  
   173  func (r *dbMergedBlockReader) BlockSize() time.Duration {
   174  	return r.blockSize
   175  }
   176  
   177  func (r *dbMergedBlockReader) Read64() (word uint64, n byte, err error) {
   178  	reader, err := r.mergedReader()
   179  	if err != nil {
   180  		return 0, 0, err
   181  	}
   182  	return reader.Read64()
   183  }
   184  
   185  func (r *dbMergedBlockReader) Peek64() (word uint64, n byte, err error) {
   186  	reader, err := r.mergedReader()
   187  	if err != nil {
   188  		return 0, 0, err
   189  	}
   190  	return reader.Peek64()
   191  }
   192  
   193  func (r *dbMergedBlockReader) Segment() (ts.Segment, error) {
   194  	reader, err := r.mergedReader()
   195  	if err != nil {
   196  		return ts.Segment{}, err
   197  	}
   198  	return reader.Segment()
   199  }
   200  
   201  func (r *dbMergedBlockReader) SegmentReader() (xio.SegmentReader, error) {
   202  	reader, err := r.mergedReader()
   203  	if err != nil {
   204  		return nil, err
   205  	}
   206  	return reader.SegmentReader, nil
   207  }
   208  
   209  func (r *dbMergedBlockReader) Reset(_ ts.Segment) {
   210  	panic(fmt.Errorf("merged block reader not available for re-use"))
   211  }
   212  
   213  func (r *dbMergedBlockReader) ResetWindowed(_ ts.Segment, _, _ time.Time) {
   214  	panic(fmt.Errorf("merged block reader not available for re-use"))
   215  }
   216  
   217  func (r *dbMergedBlockReader) Finalize() {
   218  	r.Lock()
   219  
   220  	// Can blocking close, the finalizer will complete immediately
   221  	// since it just dec refs on the buffer it created in the encoder.
   222  	r.ctx.BlockingClose()
   223  
   224  	r.blockStart = 0
   225  	for i := range r.streams {
   226  		if r.streams[i].stream != nil && r.streams[i].finalize {
   227  			r.streams[i].stream.Finalize()
   228  		}
   229  		r.streams[i].stream = nil
   230  	}
   231  	for i := range r.readers {
   232  		if r.readers[i] != nil {
   233  			r.readers[i] = nil
   234  		}
   235  	}
   236  
   237  	if r.merged.IsNotEmpty() {
   238  		r.merged.Finalize()
   239  	}
   240  	r.merged = xio.EmptyBlockReader
   241  
   242  	if r.encoder != nil {
   243  		r.encoder.Close()
   244  	}
   245  	r.encoder = nil
   246  
   247  	r.err = nil
   248  
   249  	r.Unlock()
   250  }