github.com/cockroachdb/pebble@v0.0.0-20231214172447-ab4952c5f87b/metrics.go (about) 1 // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use 2 // of this source code is governed by a BSD-style license that can be found in 3 // the LICENSE file. 4 5 package pebble 6 7 import ( 8 "fmt" 9 "math" 10 "time" 11 12 "github.com/cockroachdb/pebble/internal/base" 13 "github.com/cockroachdb/pebble/internal/cache" 14 "github.com/cockroachdb/pebble/internal/humanize" 15 "github.com/cockroachdb/pebble/objstorage/objstorageprovider/sharedcache" 16 "github.com/cockroachdb/pebble/record" 17 "github.com/cockroachdb/pebble/sstable" 18 "github.com/cockroachdb/redact" 19 "github.com/prometheus/client_golang/prometheus" 20 ) 21 22 // CacheMetrics holds metrics for the block and table cache. 23 type CacheMetrics = cache.Metrics 24 25 // FilterMetrics holds metrics for the filter policy 26 type FilterMetrics = sstable.FilterMetrics 27 28 // ThroughputMetric is a cumulative throughput metric. See the detailed 29 // comment in base. 30 type ThroughputMetric = base.ThroughputMetric 31 32 // SecondaryCacheMetrics holds metrics for the persistent secondary cache 33 // that caches commonly accessed blocks from blob storage on a local 34 // file system. 35 type SecondaryCacheMetrics = sharedcache.Metrics 36 37 // LevelMetrics holds per-level metrics such as the number of files and total 38 // size of the files, and compaction related metrics. 39 type LevelMetrics struct { 40 // The number of sublevels within the level. The sublevel count corresponds 41 // to the read amplification for the level. An empty level will have a 42 // sublevel count of 0, implying no read amplification. Only L0 will have 43 // a sublevel count other than 0 or 1. 44 Sublevels int32 45 // The total number of files in the level. 46 NumFiles int64 47 // The total number of virtual sstables in the level. 48 NumVirtualFiles uint64 49 // The total size in bytes of the files in the level. 50 Size int64 51 // The total size of the virtual sstables in the level. 52 VirtualSize uint64 53 // The level's compaction score. This is the compensatedScoreRatio in the 54 // candidateLevelInfo. 55 Score float64 56 // The number of incoming bytes from other levels read during 57 // compactions. This excludes bytes moved and bytes ingested. For L0 this is 58 // the bytes written to the WAL. 59 BytesIn uint64 60 // The number of bytes ingested. The sibling metric for tables is 61 // TablesIngested. 62 BytesIngested uint64 63 // The number of bytes moved into the level by a "move" compaction. The 64 // sibling metric for tables is TablesMoved. 65 BytesMoved uint64 66 // The number of bytes read for compactions at the level. This includes bytes 67 // read from other levels (BytesIn), as well as bytes read for the level. 68 BytesRead uint64 69 // The number of bytes written during compactions. The sibling 70 // metric for tables is TablesCompacted. This metric may be summed 71 // with BytesFlushed to compute the total bytes written for the level. 72 BytesCompacted uint64 73 // The number of bytes written during flushes. The sibling 74 // metrics for tables is TablesFlushed. This metric is always 75 // zero for all levels other than L0. 76 BytesFlushed uint64 77 // The number of sstables compacted to this level. 78 TablesCompacted uint64 79 // The number of sstables flushed to this level. 80 TablesFlushed uint64 81 // The number of sstables ingested into the level. 82 TablesIngested uint64 83 // The number of sstables moved to this level by a "move" compaction. 84 TablesMoved uint64 85 86 MultiLevel struct { 87 // BytesInTop are the total bytes in a multilevel compaction coming from the top level. 88 BytesInTop uint64 89 90 // BytesIn, exclusively for multiLevel compactions. 91 BytesIn uint64 92 93 // BytesRead, exclusively for multilevel compactions. 94 BytesRead uint64 95 } 96 97 // Additional contains misc additional metrics that are not always printed. 98 Additional struct { 99 // The sum of Properties.ValueBlocksSize for all the sstables in this 100 // level. Printed by LevelMetrics.format iff there is at least one level 101 // with a non-zero value. 102 ValueBlocksSize uint64 103 // Cumulative metrics about bytes written to data blocks and value blocks, 104 // via compactions (except move compactions) or flushes. Not printed by 105 // LevelMetrics.format, but are available to sophisticated clients. 106 BytesWrittenDataBlocks uint64 107 BytesWrittenValueBlocks uint64 108 } 109 } 110 111 // Add updates the counter metrics for the level. 112 func (m *LevelMetrics) Add(u *LevelMetrics) { 113 m.NumFiles += u.NumFiles 114 m.NumVirtualFiles += u.NumVirtualFiles 115 m.VirtualSize += u.VirtualSize 116 m.Size += u.Size 117 m.BytesIn += u.BytesIn 118 m.BytesIngested += u.BytesIngested 119 m.BytesMoved += u.BytesMoved 120 m.BytesRead += u.BytesRead 121 m.BytesCompacted += u.BytesCompacted 122 m.BytesFlushed += u.BytesFlushed 123 m.TablesCompacted += u.TablesCompacted 124 m.TablesFlushed += u.TablesFlushed 125 m.TablesIngested += u.TablesIngested 126 m.TablesMoved += u.TablesMoved 127 m.MultiLevel.BytesInTop += u.MultiLevel.BytesInTop 128 m.MultiLevel.BytesRead += u.MultiLevel.BytesRead 129 m.MultiLevel.BytesIn += u.MultiLevel.BytesIn 130 m.Additional.BytesWrittenDataBlocks += u.Additional.BytesWrittenDataBlocks 131 m.Additional.BytesWrittenValueBlocks += u.Additional.BytesWrittenValueBlocks 132 m.Additional.ValueBlocksSize += u.Additional.ValueBlocksSize 133 } 134 135 // WriteAmp computes the write amplification for compactions at this 136 // level. Computed as (BytesFlushed + BytesCompacted) / BytesIn. 137 func (m *LevelMetrics) WriteAmp() float64 { 138 if m.BytesIn == 0 { 139 return 0 140 } 141 return float64(m.BytesFlushed+m.BytesCompacted) / float64(m.BytesIn) 142 } 143 144 // Metrics holds metrics for various subsystems of the DB such as the Cache, 145 // Compactions, WAL, and per-Level metrics. 146 // 147 // TODO(peter): The testing of these metrics is relatively weak. There should 148 // be testing that performs various operations on a DB and verifies that the 149 // metrics reflect those operations. 150 type Metrics struct { 151 BlockCache CacheMetrics 152 153 Compact struct { 154 // The total number of compactions, and per-compaction type counts. 155 Count int64 156 DefaultCount int64 157 DeleteOnlyCount int64 158 ElisionOnlyCount int64 159 MoveCount int64 160 ReadCount int64 161 RewriteCount int64 162 MultiLevelCount int64 163 CounterLevelCount int64 164 // An estimate of the number of bytes that need to be compacted for the LSM 165 // to reach a stable state. 166 EstimatedDebt uint64 167 // Number of bytes present in sstables being written by in-progress 168 // compactions. This value will be zero if there are no in-progress 169 // compactions. 170 InProgressBytes int64 171 // Number of compactions that are in-progress. 172 NumInProgress int64 173 // MarkedFiles is a count of files that are marked for 174 // compaction. Such files are compacted in a rewrite compaction 175 // when no other compactions are picked. 176 MarkedFiles int 177 // Duration records the cumulative duration of all compactions since the 178 // database was opened. 179 Duration time.Duration 180 } 181 182 Ingest struct { 183 // The total number of ingestions 184 Count uint64 185 } 186 187 Flush struct { 188 // The total number of flushes. 189 Count int64 190 WriteThroughput ThroughputMetric 191 // Number of flushes that are in-progress. In the current implementation 192 // this will always be zero or one. 193 NumInProgress int64 194 // AsIngestCount is a monotonically increasing counter of flush operations 195 // handling ingested tables. 196 AsIngestCount uint64 197 // AsIngestCount is a monotonically increasing counter of tables ingested as 198 // flushables. 199 AsIngestTableCount uint64 200 // AsIngestBytes is a monotonically increasing counter of the bytes flushed 201 // for flushables that originated as ingestion operations. 202 AsIngestBytes uint64 203 } 204 205 Filter FilterMetrics 206 207 Levels [numLevels]LevelMetrics 208 209 MemTable struct { 210 // The number of bytes allocated by memtables and large (flushable) 211 // batches. 212 Size uint64 213 // The count of memtables. 214 Count int64 215 // The number of bytes present in zombie memtables which are no longer 216 // referenced by the current DB state. An unbounded number of memtables 217 // may be zombie if they're still in use by an iterator. One additional 218 // memtable may be zombie if it's no longer in use and waiting to be 219 // recycled. 220 ZombieSize uint64 221 // The count of zombie memtables. 222 ZombieCount int64 223 } 224 225 Keys struct { 226 // The approximate count of internal range key set keys in the database. 227 RangeKeySetsCount uint64 228 // The approximate count of internal tombstones (DEL, SINGLEDEL and 229 // RANGEDEL key kinds) within the database. 230 TombstoneCount uint64 231 // A cumulative total number of missized DELSIZED keys encountered by 232 // compactions since the database was opened. 233 MissizedTombstonesCount uint64 234 } 235 236 Snapshots struct { 237 // The number of currently open snapshots. 238 Count int 239 // The sequence number of the earliest, currently open snapshot. 240 EarliestSeqNum uint64 241 // A running tally of keys written to sstables during flushes or 242 // compactions that would've been elided if it weren't for open 243 // snapshots. 244 PinnedKeys uint64 245 // A running cumulative sum of the size of keys and values written to 246 // sstables during flushes or compactions that would've been elided if 247 // it weren't for open snapshots. 248 PinnedSize uint64 249 } 250 251 Table struct { 252 // The number of bytes present in obsolete tables which are no longer 253 // referenced by the current DB state or any open iterators. 254 ObsoleteSize uint64 255 // The count of obsolete tables. 256 ObsoleteCount int64 257 // The number of bytes present in zombie tables which are no longer 258 // referenced by the current DB state but are still in use by an iterator. 259 ZombieSize uint64 260 // The count of zombie tables. 261 ZombieCount int64 262 // The count of the backing sstables. 263 BackingTableCount uint64 264 // The sum of the sizes of the all of the backing sstables. 265 BackingTableSize uint64 266 } 267 268 TableCache CacheMetrics 269 270 // Count of the number of open sstable iterators. 271 TableIters int64 272 // Uptime is the total time since this DB was opened. 273 Uptime time.Duration 274 275 WAL struct { 276 // Number of live WAL files. 277 Files int64 278 // Number of obsolete WAL files. 279 ObsoleteFiles int64 280 // Physical size of the obsolete WAL files. 281 ObsoletePhysicalSize uint64 282 // Size of the live data in the WAL files. Note that with WAL file 283 // recycling this is less than the actual on-disk size of the WAL files. 284 Size uint64 285 // Physical size of the WAL files on-disk. With WAL file recycling, 286 // this is greater than the live data in WAL files. 287 PhysicalSize uint64 288 // Number of logical bytes written to the WAL. 289 BytesIn uint64 290 // Number of bytes written to the WAL. 291 BytesWritten uint64 292 } 293 294 LogWriter struct { 295 FsyncLatency prometheus.Histogram 296 record.LogWriterMetrics 297 } 298 299 CategoryStats []sstable.CategoryStatsAggregate 300 301 SecondaryCacheMetrics SecondaryCacheMetrics 302 303 private struct { 304 optionsFileSize uint64 305 manifestFileSize uint64 306 } 307 } 308 309 var ( 310 // FsyncLatencyBuckets are prometheus histogram buckets suitable for a histogram 311 // that records latencies for fsyncs. 312 FsyncLatencyBuckets = append( 313 prometheus.LinearBuckets(0.0, float64(time.Microsecond*100), 50), 314 prometheus.ExponentialBucketsRange(float64(time.Millisecond*5), float64(10*time.Second), 50)..., 315 ) 316 317 // SecondaryCacheIOBuckets exported to enable exporting from package pebble to 318 // enable exporting metrics with below buckets in CRDB. 319 SecondaryCacheIOBuckets = sharedcache.IOBuckets 320 // SecondaryCacheChannelWriteBuckets exported to enable exporting from package 321 // pebble to enable exporting metrics with below buckets in CRDB. 322 SecondaryCacheChannelWriteBuckets = sharedcache.ChannelWriteBuckets 323 ) 324 325 // DiskSpaceUsage returns the total disk space used by the database in bytes, 326 // including live and obsolete files. 327 func (m *Metrics) DiskSpaceUsage() uint64 { 328 var usageBytes uint64 329 usageBytes += m.WAL.PhysicalSize 330 usageBytes += m.WAL.ObsoletePhysicalSize 331 for _, lm := range m.Levels { 332 usageBytes += uint64(lm.Size) 333 } 334 usageBytes += m.Table.ObsoleteSize 335 usageBytes += m.Table.ZombieSize 336 usageBytes += m.private.optionsFileSize 337 usageBytes += m.private.manifestFileSize 338 usageBytes += uint64(m.Compact.InProgressBytes) 339 return usageBytes 340 } 341 342 // NumVirtual is the number of virtual sstables in the latest version 343 // summed over every level in the lsm. 344 func (m *Metrics) NumVirtual() uint64 { 345 var n uint64 346 for _, level := range m.Levels { 347 n += level.NumVirtualFiles 348 } 349 return n 350 } 351 352 // VirtualSize is the sum of the sizes of the virtual sstables in the 353 // latest version. BackingTableSize - VirtualSize gives an estimate for 354 // the space amplification caused by not compacting virtual sstables. 355 func (m *Metrics) VirtualSize() uint64 { 356 var size uint64 357 for _, level := range m.Levels { 358 size += level.VirtualSize 359 } 360 return size 361 } 362 363 // ReadAmp returns the current read amplification of the database. 364 // It's computed as the number of sublevels in L0 + the number of non-empty 365 // levels below L0. 366 func (m *Metrics) ReadAmp() int { 367 var ramp int32 368 for _, l := range m.Levels { 369 ramp += l.Sublevels 370 } 371 return int(ramp) 372 } 373 374 // Total returns the sum of the per-level metrics and WAL metrics. 375 func (m *Metrics) Total() LevelMetrics { 376 var total LevelMetrics 377 for level := 0; level < numLevels; level++ { 378 l := &m.Levels[level] 379 total.Add(l) 380 total.Sublevels += l.Sublevels 381 } 382 // Compute total bytes-in as the bytes written to the WAL + bytes ingested. 383 total.BytesIn = m.WAL.BytesWritten + total.BytesIngested 384 // Add the total bytes-in to the total bytes-flushed. This is to account for 385 // the bytes written to the log and bytes written externally and then 386 // ingested. 387 total.BytesFlushed += total.BytesIn 388 return total 389 } 390 391 // String pretty-prints the metrics as below: 392 // 393 // | | | | ingested | moved | written | | amp 394 // level | tables size val-bl vtables | score | in | tables size | tables size | tables size | read | r w 395 // ------+-----------------------------+-------+-------+--------------+--------------+--------------+-------+--------- 396 // 0 | 101 102B 0B 0 | 103.0 | 104B | 112 104B | 113 106B | 221 217B | 107B | 1 2.1 397 // 1 | 201 202B 0B 0 | 203.0 | 204B | 212 204B | 213 206B | 421 417B | 207B | 2 2.0 398 // 2 | 301 302B 0B 0 | 303.0 | 304B | 312 304B | 313 306B | 621 617B | 307B | 3 2.0 399 // 3 | 401 402B 0B 0 | 403.0 | 404B | 412 404B | 413 406B | 821 817B | 407B | 4 2.0 400 // 4 | 501 502B 0B 0 | 503.0 | 504B | 512 504B | 513 506B | 1.0K 1017B | 507B | 5 2.0 401 // 5 | 601 602B 0B 0 | 603.0 | 604B | 612 604B | 613 606B | 1.2K 1.2KB | 607B | 6 2.0 402 // 6 | 701 702B 0B 0 | - | 704B | 712 704B | 713 706B | 1.4K 1.4KB | 707B | 7 2.0 403 // total | 2.8K 2.7KB 0B 0 | - | 2.8KB | 2.9K 2.8KB | 2.9K 2.8KB | 5.7K 8.4KB | 2.8KB | 28 3.0 404 // ------------------------------------------------------------------------------------------------------------------- 405 // WAL: 22 files (24B) in: 25B written: 26B (4% overhead) 406 // Flushes: 8 407 // Compactions: 5 estimated debt: 6B in progress: 2 (7B) 408 // default: 27 delete: 28 elision: 29 move: 30 read: 31 rewrite: 32 multi-level: 33 409 // MemTables: 12 (11B) zombie: 14 (13B) 410 // Zombie tables: 16 (15B) 411 // Backing tables: 0 (0B) 412 // Block cache: 2 entries (1B) hit rate: 42.9% 413 // Table cache: 18 entries (17B) hit rate: 48.7% 414 // Secondary cache: 40 entries (40B) hit rate: 49.9% 415 // Snapshots: 4 earliest seq num: 1024 416 // Table iters: 21 417 // Filter utility: 47.4% 418 // Ingestions: 27 as flushable: 36 (34B in 35 tables) 419 func (m *Metrics) String() string { 420 return redact.StringWithoutMarkers(m) 421 } 422 423 var _ redact.SafeFormatter = &Metrics{} 424 425 // SafeFormat implements redact.SafeFormatter. 426 func (m *Metrics) SafeFormat(w redact.SafePrinter, _ rune) { 427 // NB: Pebble does not make any assumptions as to which Go primitive types 428 // have been registered as safe with redact.RegisterSafeType and does not 429 // register any types itself. Some of the calls to `redact.Safe`, etc are 430 // superfluous in the context of CockroachDB, which registers all the Go 431 // numeric types as safe. 432 433 // TODO(jackson): There are a few places where we use redact.SafeValue 434 // instead of redact.RedactableString. This is necessary because of a bug 435 // whereby formatting a redact.RedactableString argument does not respect 436 // width specifiers. When the issue is fixed, we can convert these to 437 // RedactableStrings. https://github.com/cockroachdb/redact/issues/17 438 439 multiExists := m.Compact.MultiLevelCount > 0 440 appendIfMulti := func(line redact.SafeString) { 441 if multiExists { 442 w.SafeString(line) 443 } 444 } 445 newline := func() { 446 w.SafeString("\n") 447 } 448 449 w.SafeString(" | | | | ingested | moved | written | | amp") 450 appendIfMulti(" | multilevel") 451 newline() 452 w.SafeString("level | tables size val-bl vtables | score | in | tables size | tables size | tables size | read | r w") 453 appendIfMulti(" | top in read") 454 newline() 455 w.SafeString("------+-----------------------------+-------+-------+--------------+--------------+--------------+-------+---------") 456 appendIfMulti("-+------------------") 457 newline() 458 459 // formatRow prints out a row of the table. 460 formatRow := func(m *LevelMetrics, score float64) { 461 scoreStr := "-" 462 if !math.IsNaN(score) { 463 // Try to keep the string no longer than 5 characters. 464 switch { 465 case score < 99.995: 466 scoreStr = fmt.Sprintf("%.2f", score) 467 case score < 999.95: 468 scoreStr = fmt.Sprintf("%.1f", score) 469 default: 470 scoreStr = fmt.Sprintf("%.0f", score) 471 } 472 } 473 var wampStr string 474 if wamp := m.WriteAmp(); wamp > 99.5 { 475 wampStr = fmt.Sprintf("%.0f", wamp) 476 } else { 477 wampStr = fmt.Sprintf("%.1f", wamp) 478 } 479 480 w.Printf("| %5s %6s %6s %7s | %5s | %5s | %5s %6s | %5s %6s | %5s %6s | %5s | %3d %4s", 481 humanize.Count.Int64(m.NumFiles), 482 humanize.Bytes.Int64(m.Size), 483 humanize.Bytes.Uint64(m.Additional.ValueBlocksSize), 484 humanize.Count.Uint64(m.NumVirtualFiles), 485 redact.Safe(scoreStr), 486 humanize.Bytes.Uint64(m.BytesIn), 487 humanize.Count.Uint64(m.TablesIngested), 488 humanize.Bytes.Uint64(m.BytesIngested), 489 humanize.Count.Uint64(m.TablesMoved), 490 humanize.Bytes.Uint64(m.BytesMoved), 491 humanize.Count.Uint64(m.TablesFlushed+m.TablesCompacted), 492 humanize.Bytes.Uint64(m.BytesFlushed+m.BytesCompacted), 493 humanize.Bytes.Uint64(m.BytesRead), 494 redact.Safe(m.Sublevels), 495 redact.Safe(wampStr)) 496 497 if multiExists { 498 w.Printf(" | %5s %5s %5s", 499 humanize.Bytes.Uint64(m.MultiLevel.BytesInTop), 500 humanize.Bytes.Uint64(m.MultiLevel.BytesIn), 501 humanize.Bytes.Uint64(m.MultiLevel.BytesRead)) 502 } 503 newline() 504 } 505 506 var total LevelMetrics 507 for level := 0; level < numLevels; level++ { 508 l := &m.Levels[level] 509 w.Printf("%5d ", redact.Safe(level)) 510 511 // Format the score. 512 score := math.NaN() 513 if level < numLevels-1 { 514 score = l.Score 515 } 516 formatRow(l, score) 517 total.Add(l) 518 total.Sublevels += l.Sublevels 519 } 520 // Compute total bytes-in as the bytes written to the WAL + bytes ingested. 521 total.BytesIn = m.WAL.BytesWritten + total.BytesIngested 522 // Add the total bytes-in to the total bytes-flushed. This is to account for 523 // the bytes written to the log and bytes written externally and then 524 // ingested. 525 total.BytesFlushed += total.BytesIn 526 w.SafeString("total ") 527 formatRow(&total, math.NaN()) 528 529 w.SafeString("-------------------------------------------------------------------------------------------------------------------") 530 appendIfMulti("--------------------") 531 newline() 532 w.Printf("WAL: %d files (%s) in: %s written: %s (%.0f%% overhead)\n", 533 redact.Safe(m.WAL.Files), 534 humanize.Bytes.Uint64(m.WAL.Size), 535 humanize.Bytes.Uint64(m.WAL.BytesIn), 536 humanize.Bytes.Uint64(m.WAL.BytesWritten), 537 redact.Safe(percent(int64(m.WAL.BytesWritten)-int64(m.WAL.BytesIn), int64(m.WAL.BytesIn)))) 538 539 w.Printf("Flushes: %d\n", redact.Safe(m.Flush.Count)) 540 541 w.Printf("Compactions: %d estimated debt: %s in progress: %d (%s)\n", 542 redact.Safe(m.Compact.Count), 543 humanize.Bytes.Uint64(m.Compact.EstimatedDebt), 544 redact.Safe(m.Compact.NumInProgress), 545 humanize.Bytes.Int64(m.Compact.InProgressBytes)) 546 547 w.Printf(" default: %d delete: %d elision: %d move: %d read: %d rewrite: %d multi-level: %d\n", 548 redact.Safe(m.Compact.DefaultCount), 549 redact.Safe(m.Compact.DeleteOnlyCount), 550 redact.Safe(m.Compact.ElisionOnlyCount), 551 redact.Safe(m.Compact.MoveCount), 552 redact.Safe(m.Compact.ReadCount), 553 redact.Safe(m.Compact.RewriteCount), 554 redact.Safe(m.Compact.MultiLevelCount)) 555 556 w.Printf("MemTables: %d (%s) zombie: %d (%s)\n", 557 redact.Safe(m.MemTable.Count), 558 humanize.Bytes.Uint64(m.MemTable.Size), 559 redact.Safe(m.MemTable.ZombieCount), 560 humanize.Bytes.Uint64(m.MemTable.ZombieSize)) 561 562 w.Printf("Zombie tables: %d (%s)\n", 563 redact.Safe(m.Table.ZombieCount), 564 humanize.Bytes.Uint64(m.Table.ZombieSize)) 565 566 w.Printf("Backing tables: %d (%s)\n", 567 redact.Safe(m.Table.BackingTableCount), 568 humanize.Bytes.Uint64(m.Table.BackingTableSize)) 569 w.Printf("Virtual tables: %d (%s)\n", 570 redact.Safe(m.NumVirtual()), 571 humanize.Bytes.Uint64(m.VirtualSize())) 572 573 formatCacheMetrics := func(m *CacheMetrics, name redact.SafeString) { 574 w.Printf("%s: %s entries (%s) hit rate: %.1f%%\n", 575 name, 576 humanize.Count.Int64(m.Count), 577 humanize.Bytes.Int64(m.Size), 578 redact.Safe(hitRate(m.Hits, m.Misses))) 579 } 580 formatCacheMetrics(&m.BlockCache, "Block cache") 581 formatCacheMetrics(&m.TableCache, "Table cache") 582 583 formatSharedCacheMetrics := func(w redact.SafePrinter, m *SecondaryCacheMetrics, name redact.SafeString) { 584 w.Printf("%s: %s entries (%s) hit rate: %.1f%%\n", 585 name, 586 humanize.Count.Int64(m.Count), 587 humanize.Bytes.Int64(m.Size), 588 redact.Safe(hitRate(m.ReadsWithFullHit, m.ReadsWithPartialHit+m.ReadsWithNoHit))) 589 } 590 formatSharedCacheMetrics(w, &m.SecondaryCacheMetrics, "Secondary cache") 591 592 w.Printf("Snapshots: %d earliest seq num: %d\n", 593 redact.Safe(m.Snapshots.Count), 594 redact.Safe(m.Snapshots.EarliestSeqNum)) 595 596 w.Printf("Table iters: %d\n", redact.Safe(m.TableIters)) 597 w.Printf("Filter utility: %.1f%%\n", redact.Safe(hitRate(m.Filter.Hits, m.Filter.Misses))) 598 w.Printf("Ingestions: %d as flushable: %d (%s in %d tables)\n", 599 redact.Safe(m.Ingest.Count), 600 redact.Safe(m.Flush.AsIngestCount), 601 humanize.Bytes.Uint64(m.Flush.AsIngestBytes), 602 redact.Safe(m.Flush.AsIngestTableCount)) 603 } 604 605 func hitRate(hits, misses int64) float64 { 606 return percent(hits, hits+misses) 607 } 608 609 func percent(numerator, denominator int64) float64 { 610 if denominator == 0 { 611 return 0 612 } 613 return 100 * float64(numerator) / float64(denominator) 614 } 615 616 // StringForTests is identical to m.String() on 64-bit platforms. It is used to 617 // provide a platform-independent result for tests. 618 func (m *Metrics) StringForTests() string { 619 mCopy := *m 620 if math.MaxInt == math.MaxInt32 { 621 // This is the difference in Sizeof(sstable.Reader{})) between 64 and 32 bit 622 // platforms. 623 const tableCacheSizeAdjustment = 212 624 mCopy.TableCache.Size += mCopy.TableCache.Count * tableCacheSizeAdjustment 625 } 626 return redact.StringWithoutMarkers(&mCopy) 627 }