github.com/petermattis/pebble@v0.0.0-20190905164901-ab51a2166067/cmd/pebble/test.go (about) 1 // Copyright 2018 The LevelDB-Go and Pebble Authors. All rights reserved. Use 2 // of this source code is governed by a BSD-style license that can be found in 3 // the LICENSE file. 4 5 package main 6 7 import ( 8 "fmt" 9 "log" 10 "os" 11 "os/signal" 12 "runtime/pprof" 13 "sort" 14 "sync" 15 "syscall" 16 "time" 17 18 "github.com/codahale/hdrhistogram" 19 "github.com/petermattis/pebble" 20 ) 21 22 const ( 23 minLatency = 10 * time.Microsecond 24 maxLatency = 10 * time.Second 25 ) 26 27 func startCPUProfile() func() { 28 f, err := os.Create("cpu.prof") 29 if err != nil { 30 log.Fatal(err) 31 } 32 // runtime.SetBlockProfileRate(1) 33 // runtime.SetMutexProfileFraction(1) 34 35 if err := pprof.StartCPUProfile(f); err != nil { 36 log.Fatal(err) 37 } 38 return func() { 39 pprof.StopCPUProfile() 40 f.Close() 41 42 if p := pprof.Lookup("heap"); p != nil { 43 f, err := os.Create("heap.prof") 44 if err != nil { 45 log.Fatal(err) 46 } 47 if err := p.WriteTo(f, 0); err != nil { 48 log.Fatal(err) 49 } 50 f.Close() 51 } 52 53 // if p := pprof.Lookup("mutex"); p != nil { 54 // f, err := os.Create("mutex.prof") 55 // if err != nil { 56 // log.Fatal(err) 57 // } 58 // if err := p.WriteTo(f, 0); err != nil { 59 // log.Fatal(err) 60 // } 61 // f.Close() 62 // } 63 64 // if p := pprof.Lookup("block"); p != nil { 65 // f, err := os.Create("block.prof") 66 // if err != nil { 67 // log.Fatal(err) 68 // } 69 // if err := p.WriteTo(f, 0); err != nil { 70 // log.Fatal(err) 71 // } 72 // f.Close() 73 // } 74 } 75 } 76 77 func newHistogram() *hdrhistogram.Histogram { 78 return hdrhistogram.New(minLatency.Nanoseconds(), maxLatency.Nanoseconds(), 1) 79 } 80 81 type namedHistogram struct { 82 name string 83 mu struct { 84 sync.Mutex 85 current *hdrhistogram.Histogram 86 } 87 } 88 89 func newNamedHistogram(name string) *namedHistogram { 90 w := &namedHistogram{name: name} 91 w.mu.current = newHistogram() 92 return w 93 } 94 95 func (w *namedHistogram) Record(elapsed time.Duration) { 96 if elapsed < minLatency { 97 elapsed = minLatency 98 } else if elapsed > maxLatency { 99 elapsed = maxLatency 100 } 101 102 w.mu.Lock() 103 err := w.mu.current.RecordValue(elapsed.Nanoseconds()) 104 w.mu.Unlock() 105 106 if err != nil { 107 // Note that a histogram only drops recorded values that are out of range, 108 // but we clamp the latency value to the configured range to prevent such 109 // drops. This code path should never happen. 110 panic(fmt.Sprintf(`%s: recording value: %s`, w.name, err)) 111 } 112 } 113 114 func (w *namedHistogram) tick(fn func(h *hdrhistogram.Histogram)) { 115 w.mu.Lock() 116 defer w.mu.Unlock() 117 h := w.mu.current 118 w.mu.current = newHistogram() 119 fn(h) 120 } 121 122 type histogramTick struct { 123 // Name is the name given to the histograms represented by this tick. 124 Name string 125 // Hist is the merged result of the represented histograms for this tick. 126 // Hist.TotalCount() is the number of operations that occurred for this tick. 127 Hist *hdrhistogram.Histogram 128 // Cumulative is the merged result of the represented histograms for all 129 // time. Cumulative.TotalCount() is the total number of operations that have 130 // occurred over all time. 131 Cumulative *hdrhistogram.Histogram 132 // Elapsed is the amount of time since the last tick. 133 Elapsed time.Duration 134 // Now is the time at which the tick was gathered. It covers the period 135 // [Now-Elapsed,Now). 136 Now time.Time 137 } 138 139 type histogramRegistry struct { 140 mu struct { 141 sync.Mutex 142 registered []*namedHistogram 143 } 144 145 start time.Time 146 cumulative map[string]*hdrhistogram.Histogram 147 prevTick map[string]time.Time 148 } 149 150 func newHistogramRegistry() *histogramRegistry { 151 return &histogramRegistry{ 152 start: time.Now(), 153 cumulative: make(map[string]*hdrhistogram.Histogram), 154 prevTick: make(map[string]time.Time), 155 } 156 } 157 158 func (w *histogramRegistry) Register(name string) *namedHistogram { 159 hist := newNamedHistogram(name) 160 161 w.mu.Lock() 162 w.mu.registered = append(w.mu.registered, hist) 163 w.mu.Unlock() 164 165 return hist 166 } 167 168 func (w *histogramRegistry) Tick(fn func(histogramTick)) { 169 w.mu.Lock() 170 registered := append([]*namedHistogram(nil), w.mu.registered...) 171 w.mu.Unlock() 172 173 merged := make(map[string]*hdrhistogram.Histogram) 174 var names []string 175 for _, hist := range registered { 176 hist.tick(func(h *hdrhistogram.Histogram) { 177 if p, ok := merged[hist.name]; ok { 178 p.Merge(h) 179 } else { 180 merged[hist.name] = h 181 names = append(names, hist.name) 182 } 183 }) 184 } 185 186 now := time.Now() 187 sort.Strings(names) 188 for _, name := range names { 189 mergedHist := merged[name] 190 if _, ok := w.cumulative[name]; !ok { 191 w.cumulative[name] = newHistogram() 192 } 193 w.cumulative[name].Merge(mergedHist) 194 195 prevTick, ok := w.prevTick[name] 196 if !ok { 197 prevTick = w.start 198 } 199 w.prevTick[name] = now 200 fn(histogramTick{ 201 Name: name, 202 Hist: merged[name], 203 Cumulative: w.cumulative[name], 204 Elapsed: now.Sub(prevTick), 205 Now: now, 206 }) 207 } 208 } 209 210 type test struct { 211 init func(db DB, wg *sync.WaitGroup) 212 tick func(elapsed time.Duration, i int) 213 done func(elapsed time.Duration) 214 } 215 216 func runTest(dir string, t test) { 217 // Check if the directory exists. 218 if wipe { 219 fmt.Printf("wiping %s\n", dir) 220 if err := os.RemoveAll(dir); err != nil { 221 log.Fatal(err) 222 } 223 } 224 225 fmt.Printf("dir %s\nconcurrency %d\n", dir, concurrency) 226 227 var db DB 228 if rocksdb { 229 db = newRocksDB(dir) 230 } else { 231 db = newPebbleDB(dir) 232 } 233 234 var wg sync.WaitGroup 235 t.init(db, &wg) 236 237 ticker := time.NewTicker(time.Second) 238 defer ticker.Stop() 239 240 done := make(chan os.Signal, 3) 241 workersDone := make(chan struct{}) 242 signal.Notify(done, os.Interrupt) 243 244 go func() { 245 wg.Wait() 246 close(workersDone) 247 }() 248 249 if duration > 0 { 250 go func() { 251 time.Sleep(duration) 252 done <- syscall.Signal(0) 253 }() 254 } 255 256 stopProf := startCPUProfile() 257 defer stopProf() 258 259 backgroundCompactions := func(p *pebble.VersionMetrics) bool { 260 // The last level never gets selected as an input level for compaction, 261 // only as an output level, so ignore it for the purposes of determining if 262 // background compactions are still needed. 263 for i := range p.Levels[:len(p.Levels)-1] { 264 if p.Levels[i].Score > 1 { 265 return true 266 } 267 } 268 return false 269 } 270 271 start := time.Now() 272 for i := 0; ; i++ { 273 select { 274 case <-ticker.C: 275 if workersDone != nil { 276 t.tick(time.Since(start), i) 277 if verbose && (i%10) == 9 { 278 fmt.Printf("%s", db.Metrics()) 279 } 280 } else if waitCompactions { 281 p := db.Metrics() 282 fmt.Printf("%s", p) 283 if !backgroundCompactions(p) { 284 return 285 } 286 } 287 288 case <-workersDone: 289 workersDone = nil 290 t.done(time.Since(start)) 291 p := db.Metrics() 292 fmt.Printf("%s", p) 293 if !waitCompactions || !backgroundCompactions(p) { 294 return 295 } 296 fmt.Printf("waiting for background compactions\n") 297 298 case <-done: 299 if workersDone != nil { 300 t.done(time.Since(start)) 301 } 302 fmt.Printf("%s", db.Metrics()) 303 return 304 } 305 } 306 }