github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/kv/bulk/buffering_adder.go (about)

     1  // Copyright 2019 The Cockroach Authors.
     2  //
     3  // Use of this software is governed by the Business Source License
     4  // included in the file licenses/BSL.txt.
     5  //
     6  // As of the Change Date specified in that file, in accordance with
     7  // the Business Source License, use of this software will be governed
     8  // by the Apache License, Version 2.0, included in the file
     9  // licenses/APL.txt.
    10  
    11  package bulk
    12  
    13  import (
    14  	"context"
    15  	"sort"
    16  	"time"
    17  
    18  	"github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord"
    19  	"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase"
    20  	"github.com/cockroachdb/cockroach/pkg/roachpb"
    21  	"github.com/cockroachdb/cockroach/pkg/settings/cluster"
    22  	"github.com/cockroachdb/cockroach/pkg/storage"
    23  	"github.com/cockroachdb/cockroach/pkg/util/hlc"
    24  	"github.com/cockroachdb/cockroach/pkg/util/log"
    25  	"github.com/cockroachdb/cockroach/pkg/util/mon"
    26  	"github.com/cockroachdb/cockroach/pkg/util/timeutil"
    27  	"github.com/cockroachdb/errors"
    28  )
    29  
    30  // BufferingAdder is a wrapper for an SSTBatcher that allows out-of-order calls
    31  // to Add, buffering them up and then sorting them before then passing them in
    32  // order into an SSTBatcher
    33  type BufferingAdder struct {
    34  	sink SSTBatcher
    35  	// timestamp applied to mvcc keys created from keys during SST construction.
    36  	timestamp hlc.Timestamp
    37  
    38  	// threshold at which buffered entries will be flushed to SSTBatcher.
    39  	curBufferSize int64
    40  
    41  	// ceiling till which we can grow curBufferSize if bulkMon permits.
    42  	maxBufferSize func() int64
    43  
    44  	// unit by which we increment the curBufferSize.
    45  	incrementBufferSize int64
    46  
    47  	// currently buffered kvs.
    48  	curBuf kvBuf
    49  
    50  	flushCounts struct {
    51  		total      int
    52  		bufferSize int
    53  		totalSort  time.Duration
    54  		totalFlush time.Duration
    55  	}
    56  
    57  	// name of the BufferingAdder for the purpose of logging only.
    58  	name string
    59  
    60  	bulkMon *mon.BytesMonitor
    61  	memAcc  mon.BoundAccount
    62  
    63  	onFlush func()
    64  }
    65  
    66  var _ kvserverbase.BulkAdder = &BufferingAdder{}
    67  
    68  // MakeBulkAdder makes a kvserverbase.BulkAdder that buffers and sorts K/Vs
    69  // passed to add into SSTs that are then ingested. rangeCache if set is
    70  // consulted to avoid generating an SST that will span a range boundary and thus
    71  // encounter an error and need to be split and retired to be applied.
    72  func MakeBulkAdder(
    73  	ctx context.Context,
    74  	db SSTSender,
    75  	rangeCache *kvcoord.RangeDescriptorCache,
    76  	settings *cluster.Settings,
    77  	timestamp hlc.Timestamp,
    78  	opts kvserverbase.BulkAdderOptions,
    79  	bulkMon *mon.BytesMonitor,
    80  ) (*BufferingAdder, error) {
    81  	if opts.MinBufferSize == 0 {
    82  		opts.MinBufferSize = 32 << 20
    83  	}
    84  	if opts.MaxBufferSize == nil {
    85  		opts.MaxBufferSize = func() int64 { return 128 << 20 }
    86  	}
    87  	if opts.StepBufferSize == 0 {
    88  		opts.StepBufferSize = 32 << 20
    89  	}
    90  	if opts.SSTSize == nil {
    91  		opts.SSTSize = func() int64 { return 16 << 20 }
    92  	}
    93  	if opts.SplitAndScatterAfter == nil {
    94  		// splitting _before_ hitting max reduces chance of auto-splitting after the
    95  		// range is full and is more expensive to split/move.
    96  		opts.SplitAndScatterAfter = func() int64 { return 48 << 20 }
    97  	}
    98  
    99  	b := &BufferingAdder{
   100  		name: opts.Name,
   101  		sink: SSTBatcher{
   102  			db:                db,
   103  			maxSize:           opts.SSTSize,
   104  			rc:                rangeCache,
   105  			settings:          settings,
   106  			skipDuplicates:    opts.SkipDuplicates,
   107  			disallowShadowing: opts.DisallowShadowing,
   108  			splitAfter:        opts.SplitAndScatterAfter,
   109  		},
   110  		timestamp:           timestamp,
   111  		curBufferSize:       opts.MinBufferSize,
   112  		maxBufferSize:       opts.MaxBufferSize,
   113  		incrementBufferSize: opts.StepBufferSize,
   114  		bulkMon:             bulkMon,
   115  	}
   116  
   117  	// If no monitor is attached to the instance of a bulk adder, we do not
   118  	// control its memory usage.
   119  	if bulkMon == nil {
   120  		return b, nil
   121  	}
   122  
   123  	// At minimum a bulk adder needs enough space to store a buffer of
   124  	// curBufferSize, and a subsequent SST of SSTSize in-memory. If the memory
   125  	// account is unable to reserve this minimum threshold we cannot continue.
   126  	//
   127  	// TODO(adityamaru): IMPORT should also reserve memory for a single SST which
   128  	// it will store in-memory before sending it to RocksDB.
   129  	b.memAcc = bulkMon.MakeBoundAccount()
   130  	if err := b.memAcc.Grow(ctx, b.curBufferSize); err != nil {
   131  		return nil, errors.Wrap(err, "Not enough memory available to create a BulkAdder. Try setting a higher --max-sql-memory.")
   132  	}
   133  
   134  	return b, nil
   135  }
   136  
   137  // SetOnFlush sets a callback to run after the buffering adder flushes.
   138  func (b *BufferingAdder) SetOnFlush(fn func()) {
   139  	b.onFlush = fn
   140  }
   141  
   142  // Close closes the underlying SST builder.
   143  func (b *BufferingAdder) Close(ctx context.Context) {
   144  	log.VEventf(ctx, 2,
   145  		"bulk adder %s ingested %s, flushed %d due to buffer (%s) size. Flushed chunked as %d files (%d after split-retries), %d due to ranges, %d due to sst size.",
   146  		b.name,
   147  		sz(b.sink.totalRows.DataSize),
   148  		b.flushCounts.bufferSize,
   149  		sz(b.memAcc.Used()),
   150  		b.sink.flushCounts.total, b.sink.flushCounts.files,
   151  		b.sink.flushCounts.split, b.sink.flushCounts.sstSize,
   152  	)
   153  	b.sink.Close()
   154  
   155  	if b.bulkMon != nil {
   156  		b.memAcc.Close(ctx)
   157  		b.bulkMon.Stop(ctx)
   158  	}
   159  }
   160  
   161  // Add adds a key to the buffer and checks if it needs to flush.
   162  func (b *BufferingAdder) Add(ctx context.Context, key roachpb.Key, value []byte) error {
   163  	if err := b.curBuf.append(key, value); err != nil {
   164  		return err
   165  	}
   166  
   167  	if b.curBuf.MemSize > int(b.curBufferSize) {
   168  		// This is an optimization to try and increase the current buffer size if
   169  		// our memory account permits it. This would lead to creation of a fewer
   170  		// number of SSTs.
   171  		//
   172  		// To prevent a single import from growing its buffer indefinitely we check
   173  		// if it has exceeded its upper bound.
   174  		if b.bulkMon != nil && b.curBufferSize < b.maxBufferSize() {
   175  			if err := b.memAcc.Grow(ctx, b.incrementBufferSize); err != nil {
   176  				// If we are unable to reserve the additional memory then flush the
   177  				// buffer, and continue as normal.
   178  				b.flushCounts.bufferSize++
   179  				log.VEventf(ctx, 3, "buffer size triggering flush of %s buffer", sz(b.curBuf.MemSize))
   180  				return b.Flush(ctx)
   181  			}
   182  			b.curBufferSize += b.incrementBufferSize
   183  		} else {
   184  			b.flushCounts.bufferSize++
   185  			log.VEventf(ctx, 3, "buffer size triggering flush of %s buffer", sz(b.curBuf.MemSize))
   186  			return b.Flush(ctx)
   187  		}
   188  	}
   189  	return nil
   190  }
   191  
   192  // CurrentBufferFill returns the current buffer fill percentage.
   193  func (b *BufferingAdder) CurrentBufferFill() float32 {
   194  	return float32(b.curBuf.MemSize) / float32(b.curBufferSize)
   195  }
   196  
   197  // IsEmpty returns true if the adder has no un-flushed data in its buffer.
   198  func (b *BufferingAdder) IsEmpty() bool {
   199  	return b.curBuf.Len() == 0
   200  }
   201  
   202  // Flush flushes any buffered kvs to the batcher.
   203  func (b *BufferingAdder) Flush(ctx context.Context) error {
   204  	if b.curBuf.Len() == 0 {
   205  		if b.onFlush != nil {
   206  			b.onFlush()
   207  		}
   208  		return nil
   209  	}
   210  	if err := b.sink.Reset(ctx); err != nil {
   211  		return err
   212  	}
   213  	b.flushCounts.total++
   214  
   215  	before := b.sink.flushCounts
   216  	beforeSize := b.sink.totalRows.DataSize
   217  
   218  	beforeSort := timeutil.Now()
   219  
   220  	sort.Sort(&b.curBuf)
   221  	mvccKey := storage.MVCCKey{Timestamp: b.timestamp}
   222  
   223  	beforeFlush := timeutil.Now()
   224  	b.flushCounts.totalSort += beforeFlush.Sub(beforeSort)
   225  
   226  	for i := range b.curBuf.entries {
   227  		mvccKey.Key = b.curBuf.Key(i)
   228  		if err := b.sink.AddMVCCKey(ctx, mvccKey, b.curBuf.Value(i)); err != nil {
   229  			return err
   230  		}
   231  	}
   232  	if err := b.sink.Flush(ctx); err != nil {
   233  		return err
   234  	}
   235  	b.flushCounts.totalFlush += timeutil.Since(beforeFlush)
   236  
   237  	if log.V(3) {
   238  		written := b.sink.totalRows.DataSize - beforeSize
   239  		files := b.sink.flushCounts.total - before.total
   240  		dueToSplits := b.sink.flushCounts.split - before.split
   241  		dueToSize := b.sink.flushCounts.sstSize - before.sstSize
   242  
   243  		log.Infof(ctx,
   244  			"flushing %s buffer wrote %d SSTs (avg: %s) with %d for splits, %d for size, took %v",
   245  			sz(b.curBuf.MemSize), files, sz(written/int64(files)), dueToSplits, dueToSize, timeutil.Since(beforeSort),
   246  		)
   247  	}
   248  	if log.V(4) {
   249  		log.Infof(ctx,
   250  			"bulk adder %s has ingested %s, spent %v sorting and %v flushing (%v sending, %v splitting). Flushed %d times due to buffer (%s) size. Flushed chunked as %d files (%d after split-retries), %d due to ranges, %d due to sst size.",
   251  			b.name,
   252  			sz(b.sink.totalRows.DataSize),
   253  			b.flushCounts.totalSort,
   254  			b.flushCounts.totalFlush,
   255  			b.sink.flushCounts.sendWait,
   256  			b.sink.flushCounts.splitWait,
   257  			b.flushCounts.bufferSize,
   258  			sz(b.memAcc.Used()),
   259  			b.sink.flushCounts.total, b.sink.flushCounts.files,
   260  			b.sink.flushCounts.split, b.sink.flushCounts.sstSize,
   261  		)
   262  	}
   263  	if b.onFlush != nil {
   264  		b.onFlush()
   265  	}
   266  	b.curBuf.Reset()
   267  	return nil
   268  }
   269  
   270  // GetSummary returns this batcher's total added rows/bytes/etc.
   271  func (b *BufferingAdder) GetSummary() roachpb.BulkOpSummary {
   272  	return b.sink.GetSummary()
   273  }