github.com/cockroachdb/pebble@v0.0.0-20231214172447-ab4952c5f87b/cmd/pebble/ycsb.go (about)

     1  // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
     2  // of this source code is governed by a BSD-style license that can be found in
     3  // the LICENSE file.
     4  
     5  package main
     6  
     7  import (
     8  	"fmt"
     9  	"log"
    10  	"strconv"
    11  	"strings"
    12  	"sync"
    13  	"sync/atomic"
    14  	"time"
    15  
    16  	"github.com/cockroachdb/errors"
    17  	"github.com/cockroachdb/pebble"
    18  	"github.com/cockroachdb/pebble/internal/ackseq"
    19  	"github.com/cockroachdb/pebble/internal/randvar"
    20  	"github.com/cockroachdb/pebble/internal/rate"
    21  	"github.com/spf13/cobra"
    22  	"golang.org/x/exp/rand"
    23  )
    24  
    25  const (
    26  	ycsbInsert = iota
    27  	ycsbRead
    28  	ycsbScan
    29  	ycsbReverseScan
    30  	ycsbUpdate
    31  	ycsbNumOps
    32  )
    33  
    34  var ycsbConfig struct {
    35  	batch            *randvar.Flag
    36  	keys             string
    37  	initialKeys      int
    38  	prepopulatedKeys int
    39  	numOps           uint64
    40  	scans            *randvar.Flag
    41  	values           *randvar.BytesFlag
    42  	workload         string
    43  }
    44  
    45  var ycsbCmd = &cobra.Command{
    46  	Use:   "ycsb <dir>",
    47  	Short: "run customizable YCSB benchmark",
    48  	Long: `
    49  Run a customizable YCSB workload. The workload is specified by the --workload
    50  flag which can take either one of the standard workload mixes (A-F), or
    51  customizable workload fixes specified as a command separated list of op=weight
    52  pairs. For example, --workload=read=50,update=50 performs a workload composed
    53  of 50% reads and 50% updates. This is identical to the standard workload A.
    54  
    55  The --batch, --scans, and --values flags take the specification for a random
    56  variable: [<type>:]<min>[-<max>]. The <type> parameter must be one of "uniform"
    57  or "zipf". If <type> is omitted, a uniform distribution is used. If <max> is
    58  omitted it is set to the same value as <min>. The specification "1000" results
    59  in a constant 1000. The specification "10-100" results in a uniformly random
    60  variable in the range [10,100). The specification "zipf(10,100)" results in a
    61  zipf distribution with a minimum value of 10 and a maximum value of 100.
    62  
    63  The --batch flag controls the size of batches used for insert and update
    64  operations. The --scans flag controls the number of iterations performed by a
    65  scan operation. Read operations always read a single key.
    66  
    67  The --values flag provides for an optional "/<target-compression-ratio>"
    68  suffix. The default target compression ratio is 1.0 (i.e. incompressible random
    69  data). A value of 2 will cause random data to be generated that should compress
    70  to 50% of its uncompressed size.
    71  
    72  Standard workloads:
    73  
    74    A:  50% reads   /  50% updates
    75    B:  95% reads   /   5% updates
    76    C: 100% reads
    77    D:  95% reads   /   5% inserts
    78    E:  95% scans   /   5% inserts
    79    F: 100% inserts
    80  `,
    81  	Args: cobra.ExactArgs(1),
    82  	RunE: runYcsb,
    83  }
    84  
    85  func init() {
    86  	initYCSB(ycsbCmd)
    87  }
    88  
    89  func initYCSB(cmd *cobra.Command) {
    90  	ycsbConfig.batch = randvar.NewFlag("1")
    91  	cmd.Flags().Var(
    92  		ycsbConfig.batch, "batch",
    93  		"batch size distribution [{zipf,uniform}:]min[-max]")
    94  	cmd.Flags().StringVar(
    95  		&ycsbConfig.keys, "keys", "zipf", "latest, uniform, or zipf")
    96  	cmd.Flags().IntVar(
    97  		&ycsbConfig.initialKeys, "initial-keys", 10000,
    98  		"initial number of keys to insert before beginning workload")
    99  	cmd.Flags().IntVar(
   100  		&ycsbConfig.prepopulatedKeys, "prepopulated-keys", 0,
   101  		"number of keys that were previously inserted into the database")
   102  	cmd.Flags().Uint64VarP(
   103  		&ycsbConfig.numOps, "num-ops", "n", 0,
   104  		"maximum number of operations (0 means unlimited)")
   105  	ycsbConfig.scans = randvar.NewFlag("zipf:1-1000")
   106  	cmd.Flags().Var(
   107  		ycsbConfig.scans, "scans",
   108  		"scan length distribution [{zipf,uniform}:]min[-max]")
   109  	cmd.Flags().StringVar(
   110  		&ycsbConfig.workload, "workload", "B",
   111  		"workload type (A-F) or spec (read=X,update=Y,...)")
   112  	ycsbConfig.values = randvar.NewBytesFlag("1000")
   113  	cmd.Flags().Var(
   114  		ycsbConfig.values, "values",
   115  		"value size distribution [{zipf,uniform}:]min[-max][/<target-compression>]")
   116  }
   117  
   118  type ycsbWeights []float64
   119  
   120  func (w ycsbWeights) get(i int) float64 {
   121  	if i >= len(w) {
   122  		return 0
   123  	}
   124  	return w[i]
   125  }
   126  
   127  var ycsbWorkloads = map[string]ycsbWeights{
   128  	"A": {
   129  		ycsbRead:   0.5,
   130  		ycsbUpdate: 0.5,
   131  	},
   132  	"B": {
   133  		ycsbRead:   0.95,
   134  		ycsbUpdate: 0.05,
   135  	},
   136  	"C": {
   137  		ycsbRead: 1.0,
   138  	},
   139  	"D": {
   140  		ycsbInsert: 0.05,
   141  		ycsbRead:   0.95,
   142  		// TODO(peter): default to skewed-latest distribution.
   143  	},
   144  	"E": {
   145  		ycsbInsert: 0.05,
   146  		ycsbScan:   0.95,
   147  	},
   148  	"F": {
   149  		ycsbInsert: 1.0,
   150  		// TODO(peter): the real workload is read-modify-write.
   151  	},
   152  }
   153  
   154  func ycsbParseWorkload(w string) (ycsbWeights, error) {
   155  	if weights := ycsbWorkloads[w]; weights != nil {
   156  		return weights, nil
   157  	}
   158  	iWeights := make([]int, ycsbNumOps)
   159  	for _, p := range strings.Split(w, ",") {
   160  		parts := strings.Split(p, "=")
   161  		if len(parts) != 2 {
   162  			return nil, errors.Errorf("malformed weights: %s", errors.Safe(w))
   163  		}
   164  		weight, err := strconv.Atoi(parts[1])
   165  		if err != nil {
   166  			return nil, err
   167  		}
   168  		switch parts[0] {
   169  		case "insert":
   170  			iWeights[ycsbInsert] = weight
   171  		case "read":
   172  			iWeights[ycsbRead] = weight
   173  		case "scan":
   174  			iWeights[ycsbScan] = weight
   175  		case "rscan":
   176  			iWeights[ycsbReverseScan] = weight
   177  		case "update":
   178  			iWeights[ycsbUpdate] = weight
   179  		}
   180  	}
   181  
   182  	var sum int
   183  	for _, w := range iWeights {
   184  		sum += w
   185  	}
   186  	if sum == 0 {
   187  		return nil, errors.Errorf("zero weight specified: %s", errors.Safe(w))
   188  	}
   189  
   190  	weights := make(ycsbWeights, ycsbNumOps)
   191  	for i := range weights {
   192  		weights[i] = float64(iWeights[i]) / float64(sum)
   193  	}
   194  	return weights, nil
   195  }
   196  
   197  func ycsbParseKeyDist(d string) (randvar.Dynamic, error) {
   198  	totalKeys := uint64(ycsbConfig.initialKeys + ycsbConfig.prepopulatedKeys)
   199  	switch strings.ToLower(d) {
   200  	case "latest":
   201  		return randvar.NewDefaultSkewedLatest()
   202  	case "uniform":
   203  		return randvar.NewUniform(1, totalKeys), nil
   204  	case "zipf":
   205  		return randvar.NewZipf(1, totalKeys, 0.99)
   206  	default:
   207  		return nil, errors.Errorf("unknown distribution: %s", errors.Safe(d))
   208  	}
   209  }
   210  
   211  func runYcsb(cmd *cobra.Command, args []string) error {
   212  	if wipe && ycsbConfig.prepopulatedKeys > 0 {
   213  		return errors.New("--wipe and --prepopulated-keys both specified which is nonsensical")
   214  	}
   215  
   216  	weights, err := ycsbParseWorkload(ycsbConfig.workload)
   217  	if err != nil {
   218  		return err
   219  	}
   220  
   221  	keyDist, err := ycsbParseKeyDist(ycsbConfig.keys)
   222  	if err != nil {
   223  		return err
   224  	}
   225  
   226  	batchDist := ycsbConfig.batch
   227  	scanDist := ycsbConfig.scans
   228  	if err != nil {
   229  		return err
   230  	}
   231  
   232  	valueDist := ycsbConfig.values
   233  	y := newYcsb(weights, keyDist, batchDist, scanDist, valueDist)
   234  	runTest(args[0], test{
   235  		init: y.init,
   236  		tick: y.tick,
   237  		done: y.done,
   238  	})
   239  	return nil
   240  }
   241  
   242  type ycsbBuf struct {
   243  	rng      *rand.Rand
   244  	keyBuf   []byte
   245  	valueBuf []byte
   246  	keyNums  []uint64
   247  }
   248  
   249  type ycsb struct {
   250  	db           DB
   251  	writeOpts    *pebble.WriteOptions
   252  	weights      ycsbWeights
   253  	reg          *histogramRegistry
   254  	keyDist      randvar.Dynamic
   255  	batchDist    randvar.Static
   256  	scanDist     randvar.Static
   257  	valueDist    *randvar.BytesFlag
   258  	readAmpCount atomic.Uint64
   259  	readAmpSum   atomic.Uint64
   260  	keyNum       *ackseq.S
   261  	numOps       atomic.Uint64
   262  	limiter      *rate.Limiter
   263  	opsMap       map[string]int
   264  }
   265  
   266  func newYcsb(
   267  	weights ycsbWeights,
   268  	keyDist randvar.Dynamic,
   269  	batchDist, scanDist randvar.Static,
   270  	valueDist *randvar.BytesFlag,
   271  ) *ycsb {
   272  	y := &ycsb{
   273  		reg:       newHistogramRegistry(),
   274  		weights:   weights,
   275  		keyDist:   keyDist,
   276  		batchDist: batchDist,
   277  		scanDist:  scanDist,
   278  		valueDist: valueDist,
   279  		opsMap:    make(map[string]int),
   280  	}
   281  	y.writeOpts = pebble.Sync
   282  	if disableWAL {
   283  		y.writeOpts = pebble.NoSync
   284  	}
   285  
   286  	ops := map[string]int{
   287  		"insert": ycsbInsert,
   288  		"read":   ycsbRead,
   289  		"rscan":  ycsbReverseScan,
   290  		"scan":   ycsbScan,
   291  		"update": ycsbUpdate,
   292  	}
   293  	for name, op := range ops {
   294  		w := y.weights.get(op)
   295  		if w == 0 {
   296  			continue
   297  		}
   298  		wstr := fmt.Sprint(int(100 * w))
   299  		fill := strings.Repeat("_", 3-len(wstr))
   300  		if fill == "" {
   301  			fill = "_"
   302  		}
   303  		fullName := fmt.Sprintf("%s%s%s", name, fill, wstr)
   304  		y.opsMap[fullName] = op
   305  	}
   306  	return y
   307  }
   308  
   309  func (y *ycsb) init(db DB, wg *sync.WaitGroup) {
   310  	y.db = db
   311  
   312  	if ycsbConfig.initialKeys > 0 {
   313  		buf := &ycsbBuf{rng: randvar.NewRand()}
   314  
   315  		b := db.NewBatch()
   316  		size := 0
   317  		start := time.Now()
   318  		last := start
   319  		for i := 1; i <= ycsbConfig.initialKeys; i++ {
   320  			if now := time.Now(); now.Sub(last) >= time.Second {
   321  				fmt.Printf("%5s inserted %d keys (%0.1f%%)\n",
   322  					time.Duration(now.Sub(start).Seconds()+0.5)*time.Second,
   323  					i-1, 100*float64(i-1)/float64(ycsbConfig.initialKeys))
   324  				last = now
   325  			}
   326  			if size >= 1<<20 {
   327  				if err := b.Commit(y.writeOpts); err != nil {
   328  					log.Fatal(err)
   329  				}
   330  				b = db.NewBatch()
   331  				size = 0
   332  			}
   333  			key := y.makeKey(uint64(i+ycsbConfig.prepopulatedKeys), buf)
   334  			value := y.randBytes(buf)
   335  			if err := b.Set(key, value, nil); err != nil {
   336  				log.Fatal(err)
   337  			}
   338  			size += len(key) + len(value)
   339  		}
   340  		if err := b.Commit(y.writeOpts); err != nil {
   341  			log.Fatal(err)
   342  		}
   343  		_ = b.Close()
   344  		fmt.Printf("inserted keys [%d-%d)\n",
   345  			1+ycsbConfig.prepopulatedKeys,
   346  			1+ycsbConfig.prepopulatedKeys+ycsbConfig.initialKeys)
   347  	}
   348  	y.keyNum = ackseq.New(uint64(ycsbConfig.initialKeys + ycsbConfig.prepopulatedKeys))
   349  
   350  	y.limiter = maxOpsPerSec.newRateLimiter()
   351  
   352  	wg.Add(concurrency)
   353  
   354  	// If this workload doesn't produce reads, sample the worst case read-amp
   355  	// from Metrics() periodically.
   356  	if y.weights.get(ycsbRead) == 0 && y.weights.get(ycsbScan) == 0 && y.weights.get(ycsbReverseScan) == 0 {
   357  		wg.Add(1)
   358  		go y.sampleReadAmp(db, wg)
   359  	}
   360  
   361  	for i := 0; i < concurrency; i++ {
   362  		go y.run(db, wg)
   363  	}
   364  }
   365  
   366  func (y *ycsb) run(db DB, wg *sync.WaitGroup) {
   367  	defer wg.Done()
   368  
   369  	var latency [ycsbNumOps]*namedHistogram
   370  	for name, op := range y.opsMap {
   371  		latency[op] = y.reg.Register(name)
   372  	}
   373  
   374  	buf := &ycsbBuf{rng: randvar.NewRand()}
   375  
   376  	ops := randvar.NewWeighted(nil, y.weights...)
   377  	for {
   378  		wait(y.limiter)
   379  
   380  		start := time.Now()
   381  
   382  		op := ops.Int()
   383  		switch op {
   384  		case ycsbInsert:
   385  			y.insert(db, buf)
   386  		case ycsbRead:
   387  			y.read(db, buf)
   388  		case ycsbScan:
   389  			y.scan(db, buf, false /* reverse */)
   390  		case ycsbReverseScan:
   391  			y.scan(db, buf, true /* reverse */)
   392  		case ycsbUpdate:
   393  			y.update(db, buf)
   394  		default:
   395  			panic("not reached")
   396  		}
   397  
   398  		latency[op].Record(time.Since(start))
   399  		if ycsbConfig.numOps > 0 && y.numOps.Add(1) >= ycsbConfig.numOps {
   400  			break
   401  		}
   402  	}
   403  }
   404  
   405  func (y *ycsb) sampleReadAmp(db DB, wg *sync.WaitGroup) {
   406  	defer wg.Done()
   407  
   408  	ticker := time.NewTicker(time.Second)
   409  	defer ticker.Stop()
   410  	for range ticker.C {
   411  		m := db.Metrics()
   412  		y.readAmpCount.Add(1)
   413  		y.readAmpSum.Add(uint64(m.ReadAmp()))
   414  		if ycsbConfig.numOps > 0 && y.numOps.Load() >= ycsbConfig.numOps {
   415  			break
   416  		}
   417  	}
   418  }
   419  
   420  func (y *ycsb) hashKey(key uint64) uint64 {
   421  	// Inlined version of fnv.New64 + Write.
   422  	const offset64 = 14695981039346656037
   423  	const prime64 = 1099511628211
   424  
   425  	h := uint64(offset64)
   426  	for i := 0; i < 8; i++ {
   427  		h *= prime64
   428  		h ^= uint64(key & 0xff)
   429  		key >>= 8
   430  	}
   431  	return h
   432  }
   433  
   434  func (y *ycsb) makeKey(keyNum uint64, buf *ycsbBuf) []byte {
   435  	const size = 24 + 10
   436  	if cap(buf.keyBuf) < size {
   437  		buf.keyBuf = make([]byte, size)
   438  	}
   439  	key := buf.keyBuf[:4]
   440  	copy(key, "user")
   441  	key = strconv.AppendUint(key, y.hashKey(keyNum), 10)
   442  	// Use the MVCC encoding for keys. This appends a timestamp with
   443  	// walltime=1. That knowledge is utilized by rocksDB.Scan.
   444  	key = append(key, '\x00', '\x00', '\x00', '\x00', '\x00',
   445  		'\x00', '\x00', '\x00', '\x01', '\x09')
   446  	buf.keyBuf = key
   447  	return key
   448  }
   449  
   450  func (y *ycsb) nextReadKey(buf *ycsbBuf) []byte {
   451  	// NB: the range of values returned by keyDist is tied to the range returned
   452  	// by keyNum.Base. See how these are both incremented by ycsb.insert().
   453  	keyNum := y.keyDist.Uint64(buf.rng)
   454  	return y.makeKey(keyNum, buf)
   455  }
   456  
   457  func (y *ycsb) randBytes(buf *ycsbBuf) []byte {
   458  	buf.valueBuf = y.valueDist.Bytes(buf.rng, buf.valueBuf)
   459  	return buf.valueBuf
   460  }
   461  
   462  func (y *ycsb) insert(db DB, buf *ycsbBuf) {
   463  	count := y.batchDist.Uint64(buf.rng)
   464  	if cap(buf.keyNums) < int(count) {
   465  		buf.keyNums = make([]uint64, count)
   466  	}
   467  	keyNums := buf.keyNums[:count]
   468  
   469  	b := db.NewBatch()
   470  	for i := range keyNums {
   471  		keyNums[i] = y.keyNum.Next()
   472  		_ = b.Set(y.makeKey(keyNums[i], buf), y.randBytes(buf), nil)
   473  	}
   474  	if err := b.Commit(y.writeOpts); err != nil {
   475  		log.Fatal(err)
   476  	}
   477  	_ = b.Close()
   478  
   479  	for i := range keyNums {
   480  		delta, err := y.keyNum.Ack(keyNums[i])
   481  		if err != nil {
   482  			log.Fatal(err)
   483  		}
   484  		if delta > 0 {
   485  			y.keyDist.IncMax(delta)
   486  		}
   487  	}
   488  }
   489  
   490  func (y *ycsb) read(db DB, buf *ycsbBuf) {
   491  	key := y.nextReadKey(buf)
   492  	iter := db.NewIter(nil)
   493  	iter.SeekGE(key)
   494  	if iter.Valid() {
   495  		_ = iter.Key()
   496  		_ = iter.Value()
   497  	}
   498  
   499  	type metrics interface {
   500  		Metrics() pebble.IteratorMetrics
   501  	}
   502  	if m, ok := iter.(metrics); ok {
   503  		y.readAmpCount.Add(1)
   504  		y.readAmpSum.Add(uint64(m.Metrics().ReadAmp))
   505  	}
   506  
   507  	if err := iter.Close(); err != nil {
   508  		log.Fatal(err)
   509  	}
   510  }
   511  
   512  func (y *ycsb) scan(db DB, buf *ycsbBuf, reverse bool) {
   513  	count := y.scanDist.Uint64(buf.rng)
   514  	key := y.nextReadKey(buf)
   515  	iter := db.NewIter(nil)
   516  	if err := db.Scan(iter, key, int64(count), reverse); err != nil {
   517  		log.Fatal(err)
   518  	}
   519  
   520  	type metrics interface {
   521  		Metrics() pebble.IteratorMetrics
   522  	}
   523  	if m, ok := iter.(metrics); ok {
   524  		y.readAmpCount.Add(1)
   525  		y.readAmpSum.Add(uint64(m.Metrics().ReadAmp))
   526  	}
   527  
   528  	if err := iter.Close(); err != nil {
   529  		log.Fatal(err)
   530  	}
   531  }
   532  
   533  func (y *ycsb) update(db DB, buf *ycsbBuf) {
   534  	count := int(y.batchDist.Uint64(buf.rng))
   535  	b := db.NewBatch()
   536  	for i := 0; i < count; i++ {
   537  		_ = b.Set(y.nextReadKey(buf), y.randBytes(buf), nil)
   538  	}
   539  	if err := b.Commit(y.writeOpts); err != nil {
   540  		log.Fatal(err)
   541  	}
   542  	_ = b.Close()
   543  }
   544  
   545  func (y *ycsb) tick(elapsed time.Duration, i int) {
   546  	if i%20 == 0 {
   547  		fmt.Println("____optype__elapsed__ops/sec(inst)___ops/sec(cum)__p50(ms)__p95(ms)__p99(ms)_pMax(ms)")
   548  	}
   549  	y.reg.Tick(func(tick histogramTick) {
   550  		h := tick.Hist
   551  
   552  		fmt.Printf("%10s %8s %14.1f %14.1f %8.1f %8.1f %8.1f %8.1f\n",
   553  			tick.Name,
   554  			time.Duration(elapsed.Seconds()+0.5)*time.Second,
   555  			float64(h.TotalCount())/tick.Elapsed.Seconds(),
   556  			float64(tick.Cumulative.TotalCount())/elapsed.Seconds(),
   557  			time.Duration(h.ValueAtQuantile(50)).Seconds()*1000,
   558  			time.Duration(h.ValueAtQuantile(95)).Seconds()*1000,
   559  			time.Duration(h.ValueAtQuantile(99)).Seconds()*1000,
   560  			time.Duration(h.ValueAtQuantile(100)).Seconds()*1000,
   561  		)
   562  	})
   563  }
   564  
   565  func (y *ycsb) done(elapsed time.Duration) {
   566  	fmt.Println("\n____optype__elapsed_____ops(total)___ops/sec(cum)__avg(ms)__p50(ms)__p95(ms)__p99(ms)_pMax(ms)")
   567  
   568  	resultTick := histogramTick{}
   569  	y.reg.Tick(func(tick histogramTick) {
   570  		h := tick.Cumulative
   571  		if resultTick.Cumulative == nil {
   572  			resultTick.Now = tick.Now
   573  			resultTick.Cumulative = h
   574  		} else {
   575  			resultTick.Cumulative.Merge(h)
   576  		}
   577  
   578  		fmt.Printf("%10s %7.1fs %14d %14.1f %8.1f %8.1f %8.1f %8.1f %8.1f\n",
   579  			tick.Name, elapsed.Seconds(), h.TotalCount(),
   580  			float64(h.TotalCount())/elapsed.Seconds(),
   581  			time.Duration(h.Mean()).Seconds()*1000,
   582  			time.Duration(h.ValueAtQuantile(50)).Seconds()*1000,
   583  			time.Duration(h.ValueAtQuantile(95)).Seconds()*1000,
   584  			time.Duration(h.ValueAtQuantile(99)).Seconds()*1000,
   585  			time.Duration(h.ValueAtQuantile(100)).Seconds()*1000)
   586  	})
   587  	fmt.Println()
   588  
   589  	resultHist := resultTick.Cumulative
   590  	m := y.db.Metrics()
   591  	total := m.Total()
   592  
   593  	readAmpCount := y.readAmpCount.Load()
   594  	readAmpSum := y.readAmpSum.Load()
   595  	if readAmpCount == 0 {
   596  		readAmpSum = 0
   597  		readAmpCount = 1
   598  	}
   599  
   600  	fmt.Printf("Benchmarkycsb/%s/values=%s %d  %0.1f ops/sec  %d read  %d write  %.2f r-amp  %0.2f w-amp\n\n",
   601  		ycsbConfig.workload, ycsbConfig.values,
   602  		resultHist.TotalCount(),
   603  		float64(resultHist.TotalCount())/elapsed.Seconds(),
   604  		total.BytesRead,
   605  		total.BytesFlushed+total.BytesCompacted,
   606  		float64(readAmpSum)/float64(readAmpCount),
   607  		total.WriteAmp(),
   608  	)
   609  }