github.com/cockroachdb/pebble@v1.1.2/cmd/pebble/test.go (about)

     1  // Copyright 2018 The LevelDB-Go and Pebble Authors. All rights reserved. Use
     2  // of this source code is governed by a BSD-style license that can be found in
     3  // the LICENSE file.
     4  
     5  package main
     6  
     7  import (
     8  	"fmt"
     9  	"io"
    10  	"log"
    11  	"os"
    12  	"os/signal"
    13  	"runtime"
    14  	"runtime/pprof"
    15  	"sort"
    16  	"sync"
    17  	"syscall"
    18  	"time"
    19  
    20  	"github.com/HdrHistogram/hdrhistogram-go"
    21  	"github.com/cockroachdb/pebble"
    22  )
    23  
    24  const (
    25  	minLatency = 10 * time.Microsecond
    26  	maxLatency = 10 * time.Second
    27  )
    28  
    29  func startCPUProfile() func() {
    30  	runtime.SetMutexProfileFraction(1000)
    31  
    32  	done := startRecording("cpu.%04d.prof", pprof.StartCPUProfile, pprof.StopCPUProfile)
    33  	return func() {
    34  		done()
    35  		if p := pprof.Lookup("heap"); p != nil {
    36  			f, err := os.Create("heap.prof")
    37  			if err != nil {
    38  				log.Fatal(err)
    39  			}
    40  			if err := p.WriteTo(f, 0); err != nil {
    41  				log.Fatal(err)
    42  			}
    43  			f.Close()
    44  		}
    45  		if p := pprof.Lookup("mutex"); p != nil {
    46  			f, err := os.Create("mutex.prof")
    47  			if err != nil {
    48  				log.Fatal(err)
    49  			}
    50  			if err := p.WriteTo(f, 0); err != nil {
    51  				log.Fatal(err)
    52  			}
    53  			f.Close()
    54  		}
    55  	}
    56  }
    57  
    58  func startRecording(fmtStr string, startFunc func(io.Writer) error, stopFunc func()) func() {
    59  	doneCh := make(chan struct{})
    60  	var doneWG sync.WaitGroup
    61  	doneWG.Add(1)
    62  
    63  	go func() {
    64  		defer doneWG.Done()
    65  
    66  		start := time.Now()
    67  		t := time.NewTicker(10 * time.Second)
    68  		defer t.Stop()
    69  
    70  		var current *os.File
    71  		defer func() {
    72  			if current != nil {
    73  				stopFunc()
    74  				current.Close()
    75  			}
    76  		}()
    77  
    78  		for {
    79  			if current != nil {
    80  				stopFunc()
    81  				current.Close()
    82  				current = nil
    83  			}
    84  			path := fmt.Sprintf(fmtStr, int(time.Since(start).Seconds()+0.5))
    85  			f, err := os.Create(path)
    86  			if err != nil {
    87  				log.Fatalf("unable to create cpu profile: %s", err)
    88  				return
    89  			}
    90  			if err := startFunc(f); err != nil {
    91  				log.Fatalf("unable to start cpu profile: %v", err)
    92  				f.Close()
    93  				return
    94  			}
    95  			current = f
    96  
    97  			select {
    98  			case <-doneCh:
    99  				return
   100  			case <-t.C:
   101  			}
   102  		}
   103  	}()
   104  
   105  	return func() {
   106  		close(doneCh)
   107  		doneWG.Wait()
   108  	}
   109  }
   110  
   111  func newHistogram() *hdrhistogram.Histogram {
   112  	return hdrhistogram.New(minLatency.Nanoseconds(), maxLatency.Nanoseconds(), 1)
   113  }
   114  
   115  type namedHistogram struct {
   116  	name string
   117  	mu   struct {
   118  		sync.Mutex
   119  		current *hdrhistogram.Histogram
   120  	}
   121  }
   122  
   123  func newNamedHistogram(name string) *namedHistogram {
   124  	w := &namedHistogram{name: name}
   125  	w.mu.current = newHistogram()
   126  	return w
   127  }
   128  
   129  func (w *namedHistogram) Record(elapsed time.Duration) {
   130  	if elapsed < minLatency {
   131  		elapsed = minLatency
   132  	} else if elapsed > maxLatency {
   133  		elapsed = maxLatency
   134  	}
   135  
   136  	w.mu.Lock()
   137  	err := w.mu.current.RecordValue(elapsed.Nanoseconds())
   138  	w.mu.Unlock()
   139  
   140  	if err != nil {
   141  		// Note that a histogram only drops recorded values that are out of range,
   142  		// but we clamp the latency value to the configured range to prevent such
   143  		// drops. This code path should never happen.
   144  		panic(fmt.Sprintf(`%s: recording value: %s`, w.name, err))
   145  	}
   146  }
   147  
   148  func (w *namedHistogram) tick(fn func(h *hdrhistogram.Histogram)) {
   149  	w.mu.Lock()
   150  	defer w.mu.Unlock()
   151  	h := w.mu.current
   152  	w.mu.current = newHistogram()
   153  	fn(h)
   154  }
   155  
   156  type histogramTick struct {
   157  	// Name is the name given to the histograms represented by this tick.
   158  	Name string
   159  	// Hist is the merged result of the represented histograms for this tick.
   160  	// Hist.TotalCount() is the number of operations that occurred for this tick.
   161  	Hist *hdrhistogram.Histogram
   162  	// Cumulative is the merged result of the represented histograms for all
   163  	// time. Cumulative.TotalCount() is the total number of operations that have
   164  	// occurred over all time.
   165  	Cumulative *hdrhistogram.Histogram
   166  	// Elapsed is the amount of time since the last tick.
   167  	Elapsed time.Duration
   168  	// Now is the time at which the tick was gathered. It covers the period
   169  	// [Now-Elapsed,Now).
   170  	Now time.Time
   171  }
   172  
   173  type histogramRegistry struct {
   174  	mu struct {
   175  		sync.Mutex
   176  		registered []*namedHistogram
   177  	}
   178  
   179  	start      time.Time
   180  	cumulative map[string]*hdrhistogram.Histogram
   181  	prevTick   map[string]time.Time
   182  }
   183  
   184  func newHistogramRegistry() *histogramRegistry {
   185  	return &histogramRegistry{
   186  		start:      time.Now(),
   187  		cumulative: make(map[string]*hdrhistogram.Histogram),
   188  		prevTick:   make(map[string]time.Time),
   189  	}
   190  }
   191  
   192  func (w *histogramRegistry) Register(name string) *namedHistogram {
   193  	hist := newNamedHistogram(name)
   194  
   195  	w.mu.Lock()
   196  	w.mu.registered = append(w.mu.registered, hist)
   197  	w.mu.Unlock()
   198  
   199  	return hist
   200  }
   201  
   202  func (w *histogramRegistry) Tick(fn func(histogramTick)) {
   203  	w.mu.Lock()
   204  	registered := append([]*namedHistogram(nil), w.mu.registered...)
   205  	w.mu.Unlock()
   206  
   207  	merged := make(map[string]*hdrhistogram.Histogram)
   208  	var names []string
   209  	for _, hist := range registered {
   210  		hist.tick(func(h *hdrhistogram.Histogram) {
   211  			if p, ok := merged[hist.name]; ok {
   212  				p.Merge(h)
   213  			} else {
   214  				merged[hist.name] = h
   215  				names = append(names, hist.name)
   216  			}
   217  		})
   218  	}
   219  
   220  	now := time.Now()
   221  	sort.Strings(names)
   222  	for _, name := range names {
   223  		mergedHist := merged[name]
   224  		if _, ok := w.cumulative[name]; !ok {
   225  			w.cumulative[name] = newHistogram()
   226  		}
   227  		w.cumulative[name].Merge(mergedHist)
   228  
   229  		prevTick, ok := w.prevTick[name]
   230  		if !ok {
   231  			prevTick = w.start
   232  		}
   233  		w.prevTick[name] = now
   234  		fn(histogramTick{
   235  			Name:       name,
   236  			Hist:       merged[name],
   237  			Cumulative: w.cumulative[name],
   238  			Elapsed:    now.Sub(prevTick),
   239  			Now:        now,
   240  		})
   241  	}
   242  }
   243  
   244  type testWithoutDB struct {
   245  	init func(wg *sync.WaitGroup)
   246  	tick func(elapsed time.Duration, i int)
   247  	done func(wg *sync.WaitGroup, elapsed time.Duration)
   248  }
   249  
   250  func runTestWithoutDB(t testWithoutDB) {
   251  	var wg sync.WaitGroup
   252  	t.init(&wg)
   253  
   254  	ticker := time.NewTicker(time.Second)
   255  	defer ticker.Stop()
   256  
   257  	done := make(chan os.Signal, 3)
   258  	workersDone := make(chan struct{})
   259  	signal.Notify(done, os.Interrupt)
   260  
   261  	go func() {
   262  		wg.Wait()
   263  		close(workersDone)
   264  	}()
   265  
   266  	if duration > 0 {
   267  		go func() {
   268  			time.Sleep(duration)
   269  			done <- syscall.Signal(0)
   270  		}()
   271  	}
   272  
   273  	stopProf := startCPUProfile()
   274  	defer stopProf()
   275  
   276  	start := time.Now()
   277  	for i := 0; ; i++ {
   278  		select {
   279  		case <-ticker.C:
   280  			if workersDone != nil {
   281  				t.tick(time.Since(start), i)
   282  			}
   283  
   284  		case <-workersDone:
   285  			workersDone = nil
   286  			t.done(&wg, time.Since(start))
   287  			return
   288  
   289  		case sig := <-done:
   290  			fmt.Println("operating system is killing the op.", sig)
   291  			if workersDone != nil {
   292  				t.done(&wg, time.Since(start))
   293  			}
   294  			return
   295  		}
   296  	}
   297  }
   298  
   299  type test struct {
   300  	init func(db DB, wg *sync.WaitGroup)
   301  	tick func(elapsed time.Duration, i int)
   302  	done func(elapsed time.Duration)
   303  }
   304  
   305  func runTest(dir string, t test) {
   306  	// Check if the directory exists.
   307  	if wipe {
   308  		fmt.Printf("wiping %s\n", dir)
   309  		if err := os.RemoveAll(dir); err != nil {
   310  			log.Fatal(err)
   311  		}
   312  	}
   313  
   314  	fmt.Printf("dir %s\nconcurrency %d\n", dir, concurrency)
   315  
   316  	db := newPebbleDB(dir)
   317  	var wg sync.WaitGroup
   318  	t.init(db, &wg)
   319  
   320  	ticker := time.NewTicker(time.Second)
   321  	defer ticker.Stop()
   322  
   323  	done := make(chan os.Signal, 3)
   324  	workersDone := make(chan struct{})
   325  	signal.Notify(done, os.Interrupt)
   326  
   327  	go func() {
   328  		wg.Wait()
   329  		close(workersDone)
   330  	}()
   331  
   332  	if maxSize > 0 {
   333  		go func() {
   334  			for {
   335  				time.Sleep(10 * time.Second)
   336  				if db.Metrics().DiskSpaceUsage() > maxSize*1e6 {
   337  					fmt.Println("max size reached")
   338  					done <- syscall.Signal(0)
   339  				}
   340  			}
   341  		}()
   342  	}
   343  	if duration > 0 {
   344  		go func() {
   345  			time.Sleep(duration)
   346  			done <- syscall.Signal(0)
   347  		}()
   348  	}
   349  
   350  	stopProf := startCPUProfile()
   351  	defer stopProf()
   352  
   353  	backgroundCompactions := func(p *pebble.Metrics) bool {
   354  		// The last level never gets selected as an input level for compaction,
   355  		// only as an output level, so ignore it for the purposes of determining if
   356  		// background compactions are still needed.
   357  		for i := range p.Levels[:len(p.Levels)-1] {
   358  			if p.Levels[i].Score > 1 {
   359  				return true
   360  			}
   361  		}
   362  		return false
   363  	}
   364  
   365  	start := time.Now()
   366  	for i := 0; ; i++ {
   367  		select {
   368  		case <-ticker.C:
   369  			if workersDone != nil {
   370  				t.tick(time.Since(start), i)
   371  				if verbose && (i%10) == 9 {
   372  					fmt.Printf("%s", db.Metrics())
   373  				}
   374  			} else if waitCompactions {
   375  				p := db.Metrics()
   376  				fmt.Printf("%s", p)
   377  				if !backgroundCompactions(p) {
   378  					return
   379  				}
   380  			}
   381  
   382  		case <-workersDone:
   383  			workersDone = nil
   384  			t.done(time.Since(start))
   385  			p := db.Metrics()
   386  			fmt.Printf("%s", p)
   387  			if !waitCompactions || !backgroundCompactions(p) {
   388  				return
   389  			}
   390  			fmt.Printf("waiting for background compactions\n")
   391  
   392  		case <-done:
   393  			if workersDone != nil {
   394  				t.done(time.Since(start))
   395  			}
   396  			fmt.Printf("%s", db.Metrics())
   397  			return
   398  		}
   399  	}
   400  }