github.com/m3db/m3@v1.5.0/src/dbnode/integration/generate/writer.go (about)

     1  // Copyright (c) 2016 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  package generate
    22  
    23  import (
    24  	"time"
    25  
    26  	"github.com/m3db/m3/src/dbnode/encoding"
    27  	ns "github.com/m3db/m3/src/dbnode/namespace"
    28  	"github.com/m3db/m3/src/dbnode/persist"
    29  	"github.com/m3db/m3/src/dbnode/persist/fs"
    30  	"github.com/m3db/m3/src/dbnode/sharding"
    31  	"github.com/m3db/m3/src/x/checked"
    32  	"github.com/m3db/m3/src/x/context"
    33  	xtime "github.com/m3db/m3/src/x/time"
    34  )
    35  
    36  type writer struct {
    37  	opts Options
    38  }
    39  
    40  // WriteAllPredicate writes all datapoints
    41  func WriteAllPredicate(_ TestValue) bool {
    42  	return true
    43  }
    44  
    45  // NewWriter returns a new writer
    46  func NewWriter(opts Options) Writer {
    47  	return &writer{
    48  		opts: opts,
    49  	}
    50  }
    51  
    52  func (w *writer) WriteData(
    53  	nsCtx ns.Context,
    54  	shardSet sharding.ShardSet,
    55  	seriesMaps SeriesBlocksByStart,
    56  	volume int,
    57  ) error {
    58  	return w.WriteDataWithPredicate(nsCtx, shardSet, seriesMaps, volume, WriteAllPredicate)
    59  }
    60  
    61  func (w *writer) WriteSnapshot(
    62  	nsCtx ns.Context,
    63  	shardSet sharding.ShardSet,
    64  	seriesMaps SeriesBlocksByStart,
    65  	volume int,
    66  	snapshotInterval time.Duration,
    67  ) error {
    68  	return w.WriteSnapshotWithPredicate(
    69  		nsCtx, shardSet, seriesMaps, volume, WriteAllPredicate, snapshotInterval)
    70  }
    71  
    72  func (w *writer) WriteDataWithPredicate(
    73  	nsCtx ns.Context,
    74  	shardSet sharding.ShardSet,
    75  	seriesMaps SeriesBlocksByStart,
    76  	volume int,
    77  	pred WriteDatapointPredicate,
    78  ) error {
    79  	return w.writeWithPredicate(
    80  		nsCtx, shardSet, seriesMaps, volume, pred, persist.FileSetFlushType, 0)
    81  }
    82  
    83  func (w *writer) WriteSnapshotWithPredicate(
    84  	nsCtx ns.Context,
    85  	shardSet sharding.ShardSet,
    86  	seriesMaps SeriesBlocksByStart,
    87  	volume int,
    88  	pred WriteDatapointPredicate,
    89  	snapshotInterval time.Duration,
    90  ) error {
    91  	return w.writeWithPredicate(
    92  		nsCtx, shardSet, seriesMaps, volume, pred, persist.FileSetSnapshotType, snapshotInterval)
    93  }
    94  
    95  func (w *writer) writeWithPredicate(
    96  	nsCtx ns.Context,
    97  	shardSet sharding.ShardSet,
    98  	seriesMaps SeriesBlocksByStart,
    99  	volume int,
   100  	pred WriteDatapointPredicate,
   101  	fileSetType persist.FileSetType,
   102  	snapshotInterval time.Duration,
   103  ) error {
   104  	var (
   105  		gOpts          = w.opts
   106  		blockSize      = gOpts.BlockSize()
   107  		currStart      = gOpts.ClockOptions().NowFn()().Truncate(blockSize)
   108  		retentionStart = currStart.Add(-gOpts.RetentionPeriod())
   109  		isValidStart   = func(start time.Time) bool {
   110  			return start.Equal(retentionStart) || start.After(retentionStart)
   111  		}
   112  		starts = make(map[xtime.UnixNano]struct{})
   113  	)
   114  
   115  	for start := currStart; isValidStart(start); start = start.Add(-blockSize) {
   116  		starts[xtime.ToUnixNano(start)] = struct{}{}
   117  	}
   118  
   119  	writer, err := fs.NewWriter(fs.NewOptions().
   120  		SetFilePathPrefix(gOpts.FilePathPrefix()).
   121  		SetWriterBufferSize(gOpts.WriterBufferSize()).
   122  		SetNewFileMode(gOpts.NewFileMode()).
   123  		SetNewDirectoryMode(gOpts.NewDirectoryMode()))
   124  	if err != nil {
   125  		return err
   126  	}
   127  	encoder := gOpts.EncoderPool().Get()
   128  	encoder.SetSchema(nsCtx.Schema)
   129  	for start, data := range seriesMaps {
   130  		err := writeToDiskWithPredicate(
   131  			writer, shardSet, encoder, start, nsCtx, blockSize,
   132  			data, volume, pred, fileSetType, snapshotInterval)
   133  		if err != nil {
   134  			return err
   135  		}
   136  		delete(starts, start)
   137  	}
   138  
   139  	// Write remaining files even for empty start periods to avoid unfulfilled ranges
   140  	if w.opts.WriteEmptyShards() {
   141  		for start := range starts {
   142  			err := writeToDiskWithPredicate(
   143  				writer, shardSet, encoder, start, nsCtx, blockSize,
   144  				nil, volume, pred, fileSetType, snapshotInterval)
   145  			if err != nil {
   146  				return err
   147  			}
   148  		}
   149  	}
   150  
   151  	return nil
   152  }
   153  
   154  func writeToDiskWithPredicate(
   155  	writer fs.DataFileSetWriter,
   156  	shardSet sharding.ShardSet,
   157  	encoder encoding.Encoder,
   158  	start xtime.UnixNano,
   159  	nsCtx ns.Context,
   160  	blockSize time.Duration,
   161  	seriesList SeriesBlock,
   162  	volume int,
   163  	pred WriteDatapointPredicate,
   164  	fileSetType persist.FileSetType,
   165  	snapshotInterval time.Duration,
   166  ) error {
   167  	seriesPerShard := make(map[uint32][]Series)
   168  	for _, shard := range shardSet.AllIDs() {
   169  		// Ensure we write out block files for each shard even if there's no data
   170  		seriesPerShard[shard] = make([]Series, 0)
   171  	}
   172  	for _, s := range seriesList {
   173  		shard := shardSet.Lookup(s.ID)
   174  		seriesPerShard[shard] = append(seriesPerShard[shard], s)
   175  	}
   176  	data := make([]checked.Bytes, 2)
   177  	for shard, seriesList := range seriesPerShard {
   178  		writerOpts := fs.DataWriterOpenOptions{
   179  			BlockSize: blockSize,
   180  			Identifier: fs.FileSetFileIdentifier{
   181  				Namespace:   nsCtx.ID,
   182  				Shard:       shard,
   183  				BlockStart:  start,
   184  				VolumeIndex: volume,
   185  			},
   186  			FileSetType: fileSetType,
   187  			Snapshot: fs.DataWriterSnapshotOptions{
   188  				SnapshotTime: start.Add(snapshotInterval),
   189  			},
   190  		}
   191  
   192  		if err := writer.Open(writerOpts); err != nil {
   193  			return err
   194  		}
   195  
   196  		ctx := context.NewBackground()
   197  		for _, series := range seriesList {
   198  			encoder.Reset(start, 0, nsCtx.Schema)
   199  			for _, dp := range series.Data {
   200  				if !pred(dp) {
   201  					continue
   202  				}
   203  
   204  				if err := encoder.Encode(dp.Datapoint, xtime.Second, dp.Annotation); err != nil {
   205  					return err
   206  				}
   207  			}
   208  
   209  			ctx.Reset()
   210  			stream, ok := encoder.Stream(ctx)
   211  			if !ok {
   212  				// None of the datapoints passed the predicate.
   213  				continue
   214  			}
   215  			segment, err := stream.Segment()
   216  			if err != nil {
   217  				return err
   218  			}
   219  			data[0] = segment.Head
   220  			data[1] = segment.Tail
   221  			checksum := segment.CalculateChecksum()
   222  			metadata := persist.NewMetadataFromIDAndTags(series.ID, series.Tags,
   223  				persist.MetadataOptions{})
   224  			err = writer.WriteAll(metadata, data, checksum)
   225  			if err != nil {
   226  				return err
   227  			}
   228  			ctx.BlockingClose()
   229  		}
   230  
   231  		if err := writer.Close(); err != nil {
   232  			return err
   233  		}
   234  	}
   235  
   236  	return nil
   237  }