github.com/m3db/m3@v1.5.0/src/dbnode/ts/writes/write_batch.go (about)

     1  // Copyright (c) 2018 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  package writes
    22  
    23  import (
    24  	"errors"
    25  
    26  	"github.com/m3db/m3/src/dbnode/ts"
    27  	"github.com/m3db/m3/src/x/ident"
    28  	xtime "github.com/m3db/m3/src/x/time"
    29  )
    30  
    31  var (
    32  	errTagsAndEncodedTagsRequired = errors.New("tags iterator and encoded tags must be provided")
    33  )
    34  
    35  const (
    36  	// preallocateBatchCoeff is used for allocating write batches of slightly bigger
    37  	// capacity than needed for the current request, in order to reduce allocations on
    38  	// subsequent reuse of pooled write batch (effective when writeBatch.adaptiveSize == true).
    39  	preallocateBatchCoeff = 1.2
    40  )
    41  
    42  type writeBatch struct {
    43  	writes       []BatchWrite
    44  	pendingIndex []PendingIndexInsert
    45  	ns           ident.ID
    46  	// Enables callers to pool encoded tags by allowing them to
    47  	// provide a function to finalize all encoded tags once the
    48  	// writeBatch itself gets finalized.
    49  	finalizeEncodedTagsFn FinalizeEncodedTagsFn
    50  	// Enables callers to pool annotations by allowing them to
    51  	// provide a function to finalize all annotations once the
    52  	// writeBatch itself gets finalized.
    53  	finalizeAnnotationFn FinalizeAnnotationFn
    54  	finalizeFn           func(WriteBatch)
    55  
    56  	// adaptiveSize means that we create writeBatch with nil slices originally,
    57  	// and then allocate/expand them based on the actual batch size (this provides
    58  	// more resilience when dealing with small batch sizes).
    59  	adaptiveSize bool
    60  }
    61  
    62  // NewWriteBatch creates a new WriteBatch.
    63  func NewWriteBatch(
    64  	initialBatchSize int,
    65  	ns ident.ID,
    66  	finalizeFn func(WriteBatch),
    67  ) WriteBatch {
    68  	var (
    69  		adaptiveSize = initialBatchSize == 0
    70  		writes       []BatchWrite
    71  		pendingIndex []PendingIndexInsert
    72  	)
    73  
    74  	if !adaptiveSize {
    75  		writes = make([]BatchWrite, 0, initialBatchSize)
    76  		pendingIndex = make([]PendingIndexInsert, 0, initialBatchSize)
    77  		// Leaving nil slices if initialBatchSize == 0,
    78  		// they will be allocated when needed, based on the actual batch size.
    79  	}
    80  
    81  	return &writeBatch{
    82  		writes:       writes,
    83  		pendingIndex: pendingIndex,
    84  		ns:           ns,
    85  		finalizeFn:   finalizeFn,
    86  		adaptiveSize: adaptiveSize,
    87  	}
    88  }
    89  
    90  func (b *writeBatch) Add(
    91  	originalIndex int,
    92  	id ident.ID,
    93  	timestamp xtime.UnixNano,
    94  	value float64,
    95  	unit xtime.Unit,
    96  	annotation []byte,
    97  ) error {
    98  	write, err := newBatchWriterWrite(
    99  		originalIndex, b.ns, id, nil, timestamp, value, unit, annotation)
   100  	if err != nil {
   101  		return err
   102  	}
   103  	b.writes = append(b.writes, write)
   104  	return nil
   105  }
   106  
   107  func (b *writeBatch) AddTagged(
   108  	originalIndex int,
   109  	id ident.ID,
   110  	encodedTags ts.EncodedTags,
   111  	timestamp xtime.UnixNano,
   112  	value float64,
   113  	unit xtime.Unit,
   114  	annotation []byte,
   115  ) error {
   116  	write, err := newBatchWriterWrite(
   117  		originalIndex, b.ns, id, encodedTags, timestamp, value, unit, annotation)
   118  	if err != nil {
   119  		return err
   120  	}
   121  	b.writes = append(b.writes, write)
   122  	return nil
   123  }
   124  
   125  func (b *writeBatch) Reset(
   126  	batchSize int,
   127  	ns ident.ID,
   128  ) {
   129  	// Preallocate slightly more when not using initialBatchSize.
   130  	adaptiveBatchCap := int(float32(batchSize) * preallocateBatchCoeff)
   131  
   132  	if batchSize > cap(b.writes) {
   133  		batchCap := batchSize
   134  		if b.adaptiveSize {
   135  			batchCap = adaptiveBatchCap
   136  		}
   137  		b.writes = make([]BatchWrite, 0, batchCap)
   138  	} else {
   139  		b.writes = b.writes[:0]
   140  	}
   141  
   142  	if batchSize > cap(b.pendingIndex) {
   143  		batchCap := batchSize
   144  		if b.adaptiveSize {
   145  			batchCap = adaptiveBatchCap
   146  		}
   147  		b.pendingIndex = make([]PendingIndexInsert, 0, batchCap)
   148  	} else {
   149  		b.pendingIndex = b.pendingIndex[:0]
   150  	}
   151  
   152  	b.ns = ns
   153  	b.finalizeEncodedTagsFn = nil
   154  	b.finalizeAnnotationFn = nil
   155  }
   156  
   157  func (b *writeBatch) Iter() []BatchWrite {
   158  	return b.writes
   159  }
   160  
   161  func (b *writeBatch) SetSeries(idx int, series ts.Series) {
   162  	b.writes[idx].SkipWrite = false
   163  	b.writes[idx].Write.Series = series
   164  	// Make sure that the EncodedTags does not get clobbered
   165  	b.writes[idx].Write.Series.EncodedTags = b.writes[idx].EncodedTags
   166  }
   167  
   168  func (b *writeBatch) SetError(idx int, err error) {
   169  	b.writes[idx].SkipWrite = true
   170  	b.writes[idx].Err = err
   171  }
   172  
   173  func (b *writeBatch) SetSkipWrite(idx int) {
   174  	b.writes[idx].SkipWrite = true
   175  }
   176  
   177  func (b *writeBatch) SetPendingIndex(idx int, pending PendingIndexInsert) {
   178  	b.writes[idx].PendingIndex = true
   179  	b.pendingIndex = append(b.pendingIndex, pending)
   180  }
   181  
   182  func (b *writeBatch) PendingIndex() []PendingIndexInsert {
   183  	return b.pendingIndex
   184  }
   185  
   186  // SetFinalizeEncodedTagsFn sets the function that will be called to finalize encodedTags
   187  // when a WriteBatch is finalized, allowing the caller to pool them.
   188  func (b *writeBatch) SetFinalizeEncodedTagsFn(f FinalizeEncodedTagsFn) {
   189  	b.finalizeEncodedTagsFn = f
   190  }
   191  
   192  // SetFinalizeAnnotationFn sets the function that will be called to finalize annotations
   193  // when a WriteBatch is finalized, allowing the caller to pool them.
   194  func (b *writeBatch) SetFinalizeAnnotationFn(f FinalizeAnnotationFn) {
   195  	b.finalizeAnnotationFn = f
   196  }
   197  
   198  func (b *writeBatch) Finalize() {
   199  	if b.finalizeEncodedTagsFn != nil {
   200  		for _, write := range b.writes {
   201  			encodedTags := write.EncodedTags
   202  			if encodedTags == nil {
   203  				continue
   204  			}
   205  
   206  			b.finalizeEncodedTagsFn(encodedTags)
   207  		}
   208  	}
   209  	b.finalizeEncodedTagsFn = nil
   210  
   211  	if b.finalizeAnnotationFn != nil {
   212  		for _, write := range b.writes {
   213  			annotation := write.Write.Annotation
   214  			if annotation == nil {
   215  				continue
   216  			}
   217  
   218  			b.finalizeAnnotationFn(annotation)
   219  		}
   220  	}
   221  	b.finalizeAnnotationFn = nil
   222  
   223  	b.ns = nil
   224  
   225  	var zeroedWrite BatchWrite
   226  	for i := range b.writes {
   227  		// Remove any remaining pointers for G.C reasons.
   228  		b.writes[i] = zeroedWrite
   229  	}
   230  	b.writes = b.writes[:0]
   231  
   232  	var zeroedIndex PendingIndexInsert
   233  	for i := range b.pendingIndex {
   234  		// Remove any remaining pointers for G.C reasons.
   235  		b.pendingIndex[i] = zeroedIndex
   236  	}
   237  	b.pendingIndex = b.pendingIndex[:0]
   238  
   239  	b.finalizeFn(b)
   240  }
   241  
   242  func (b *writeBatch) cap() int {
   243  	return cap(b.writes)
   244  }
   245  
   246  func newBatchWriterWrite(
   247  	originalIndex int,
   248  	namespace ident.ID,
   249  	id ident.ID,
   250  	encodedTags ts.EncodedTags,
   251  	timestamp xtime.UnixNano,
   252  	value float64,
   253  	unit xtime.Unit,
   254  	annotation []byte,
   255  ) (BatchWrite, error) {
   256  	write := encodedTags == nil
   257  	writeTagged := encodedTags != nil
   258  	if !write && !writeTagged {
   259  		return BatchWrite{}, errTagsAndEncodedTagsRequired
   260  	}
   261  	return BatchWrite{
   262  		Write: Write{
   263  			Series: ts.Series{
   264  				ID:          id,
   265  				EncodedTags: encodedTags,
   266  				Namespace:   namespace,
   267  			},
   268  			Datapoint: ts.Datapoint{
   269  				TimestampNanos: timestamp,
   270  				Value:          value,
   271  			},
   272  			Unit:       unit,
   273  			Annotation: annotation,
   274  		},
   275  		EncodedTags:   encodedTags,
   276  		OriginalIndex: originalIndex,
   277  	}, nil
   278  }