github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/cli/systembench/cpu_bench.go (about)

     1  // Copyright 2018 The Cockroach Authors.
     2  //
     3  // Use of this software is governed by the Business Source License
     4  // included in the file licenses/BSL.txt.
     5  //
     6  // As of the Change Date specified in that file, in accordance with
     7  // the Business Source License, use of this software will be governed
     8  // by the Apache License, Version 2.0, included in the file
     9  // licenses/APL.txt.
    10  
    11  package systembench
    12  
    13  import (
    14  	"context"
    15  	"fmt"
    16  	"math"
    17  	"sync/atomic"
    18  	"time"
    19  
    20  	"github.com/cockroachdb/cockroach/pkg/util/timeutil"
    21  	"github.com/cockroachdb/errors"
    22  	"github.com/dustin/go-humanize"
    23  	"golang.org/x/sync/errgroup"
    24  )
    25  
    26  // CPUBenchmarkType represents a CPU Benchmark.
    27  type CPUBenchmarkType int
    28  
    29  const (
    30  	// CPUPrimeTest identifies a prime factoring CPU test.
    31  	CPUPrimeTest CPUBenchmarkType = iota
    32  )
    33  
    34  // CPUOptions holds parameters for the test.
    35  type CPUOptions struct {
    36  	Concurrency int
    37  	Duration    time.Duration
    38  
    39  	Type CPUBenchmarkType
    40  }
    41  
    42  // workerCPUPrimes holds a latency histogram.
    43  type workerCPUPrimes struct {
    44  	latency *namedHistogram
    45  }
    46  
    47  // workerCPUPrimes implements the worker interface.
    48  func (w *workerCPUPrimes) run(ctx context.Context) error {
    49  	dividend := 1
    50  	// This is to make sure we're measuring the divisions
    51  	// vs the performance of locking.
    52  	batchSize := 1000
    53  	for {
    54  		start := timeutil.Now()
    55  		count := uint64(0)
    56  		for i := 0; i < batchSize; i++ {
    57  			limit := math.Sqrt(float64(dividend))
    58  			divisor := 1
    59  			for float64(divisor) <= limit {
    60  				if ctx.Err() != nil {
    61  					return ctx.Err()
    62  				}
    63  				remainder := dividend % divisor
    64  				count++
    65  
    66  				if remainder == 0 {
    67  					dividend++
    68  					break
    69  				}
    70  				divisor++
    71  			}
    72  		}
    73  		elapsed := timeutil.Since(start)
    74  		atomic.AddUint64(&numOps, count)
    75  		w.latency.Record(elapsed)
    76  	}
    77  }
    78  
    79  // workerCPUPrimes implements the worker interface.
    80  func (w *workerCPUPrimes) getLatencyHistogram() *namedHistogram {
    81  	return w.latency
    82  }
    83  
    84  // newWorkerCPUPrimes creates a worker that will verify prime numbers by doing standard
    85  // division of the number by all numbers between 2 and the square root of the number.
    86  // If any number gives a remainder of 0, the next number is calculated.
    87  func newWorkerCPUPrimes(
    88  	ctx context.Context, cpuOptions *CPUOptions, registry *histogramRegistry,
    89  ) worker {
    90  	registry.Register("ops")
    91  	w := &workerCPUPrimes{}
    92  	w.latency = registry.Register("ops")
    93  	return w
    94  }
    95  
    96  // RunCPU runs cpu benchmarks specified by cpuOptions.
    97  func RunCPU(cpuOptions CPUOptions) error {
    98  	ctx := context.Background()
    99  	reg := newHistogramRegistry()
   100  
   101  	workers := make([]worker, cpuOptions.Concurrency)
   102  	var workerCreator func(ctx context.Context, cpuOptions *CPUOptions, registry *histogramRegistry) worker
   103  
   104  	switch cpuOptions.Type {
   105  	case CPUPrimeTest:
   106  		workerCreator = newWorkerCPUPrimes
   107  	default:
   108  		return errors.Errorf("Please specify a valid subtest.")
   109  	}
   110  
   111  	for i := range workers {
   112  		workers[i] = workerCreator(ctx, &cpuOptions, reg)
   113  	}
   114  
   115  	start := timeutil.Now()
   116  	lastNow := start
   117  	var lastOps uint64
   118  
   119  	return runTest(ctx, test{
   120  		init: func(g *errgroup.Group) {
   121  			for i := range workers {
   122  				g.Go(func() error {
   123  					return workers[i].run(ctx)
   124  				})
   125  			}
   126  		},
   127  
   128  		tick: func(elapsed time.Duration, i int) {
   129  			now := timeutil.Now()
   130  			ops := atomic.LoadUint64(&numOps)
   131  			elapsedSinceLastTick := now.Sub(lastNow)
   132  			if i%20 == 0 {
   133  				fmt.Println("_elapsed____ops/sec__p50(ms)__p95(ms)__p99(ms)_pMax(ms)")
   134  			}
   135  			reg.Tick(func(tick histogramTick) {
   136  				h := tick.Hist
   137  				fmt.Printf("%8s %10s %8.1f %8.1f %8.1f %8.1f\n",
   138  					time.Duration(timeutil.Since(start).Seconds()+0.5)*time.Second,
   139  					humanize.Comma(int64(float64(ops-lastOps)/elapsedSinceLastTick.Seconds())),
   140  					time.Duration(h.ValueAtQuantile(50)).Seconds()*1000,
   141  					time.Duration(h.ValueAtQuantile(95)).Seconds()*1000,
   142  					time.Duration(h.ValueAtQuantile(99)).Seconds()*1000,
   143  					time.Duration(h.ValueAtQuantile(100)).Seconds()*1000,
   144  				)
   145  			})
   146  			lastNow = now
   147  			lastOps = ops
   148  		},
   149  
   150  		done: func(elapsed time.Duration) {
   151  			startElapsed := timeutil.Since(start)
   152  			const totalHeader = "\n_elapsed____ops(total)__avg(ms)__p50(ms)__p95(ms)__p99(ms)_pMax(ms)"
   153  			fmt.Println(totalHeader + `__total`)
   154  			reg.Tick(func(tick histogramTick) {
   155  				h := tick.Cumulative
   156  				fmt.Printf("%8s %13s %8.1f %8.1f %8.1f %8.1f %8.1f\n",
   157  					time.Duration(startElapsed.Seconds())*time.Second,
   158  					humanize.Comma(int64(atomic.LoadUint64(&numOps))),
   159  					time.Duration(h.Mean()).Seconds()*1000,
   160  					time.Duration(h.ValueAtQuantile(50)).Seconds()*1000,
   161  					time.Duration(h.ValueAtQuantile(95)).Seconds()*1000,
   162  					time.Duration(h.ValueAtQuantile(99)).Seconds()*1000,
   163  					time.Duration(h.ValueAtQuantile(100)).Seconds()*1000)
   164  			})
   165  		},
   166  	}, cpuOptions.Duration)
   167  }