github.com/cockroachdb/pebble@v1.1.1-0.20240513155919-3622ade60459/metrics.go (about)

     1  // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
     2  // of this source code is governed by a BSD-style license that can be found in
     3  // the LICENSE file.
     4  
     5  package pebble
     6  
     7  import (
     8  	"fmt"
     9  	"math"
    10  	"time"
    11  
    12  	"github.com/cockroachdb/pebble/internal/base"
    13  	"github.com/cockroachdb/pebble/internal/cache"
    14  	"github.com/cockroachdb/pebble/internal/humanize"
    15  	"github.com/cockroachdb/pebble/objstorage/objstorageprovider/sharedcache"
    16  	"github.com/cockroachdb/pebble/record"
    17  	"github.com/cockroachdb/pebble/sstable"
    18  	"github.com/cockroachdb/redact"
    19  	"github.com/prometheus/client_golang/prometheus"
    20  )
    21  
    22  // CacheMetrics holds metrics for the block and table cache.
    23  type CacheMetrics = cache.Metrics
    24  
    25  // FilterMetrics holds metrics for the filter policy
    26  type FilterMetrics = sstable.FilterMetrics
    27  
    28  // ThroughputMetric is a cumulative throughput metric. See the detailed
    29  // comment in base.
    30  type ThroughputMetric = base.ThroughputMetric
    31  
    32  // SecondaryCacheMetrics holds metrics for the persistent secondary cache
    33  // that caches commonly accessed blocks from blob storage on a local
    34  // file system.
    35  type SecondaryCacheMetrics = sharedcache.Metrics
    36  
    37  // LevelMetrics holds per-level metrics such as the number of files and total
    38  // size of the files, and compaction related metrics.
    39  type LevelMetrics struct {
    40  	// The number of sublevels within the level. The sublevel count corresponds
    41  	// to the read amplification for the level. An empty level will have a
    42  	// sublevel count of 0, implying no read amplification. Only L0 will have
    43  	// a sublevel count other than 0 or 1.
    44  	Sublevels int32
    45  	// The total number of files in the level.
    46  	NumFiles int64
    47  	// The total number of virtual sstables in the level.
    48  	NumVirtualFiles uint64
    49  	// The total size in bytes of the files in the level.
    50  	Size int64
    51  	// The total size of the virtual sstables in the level.
    52  	VirtualSize uint64
    53  	// The level's compaction score. This is the compensatedScoreRatio in the
    54  	// candidateLevelInfo.
    55  	Score float64
    56  	// The number of incoming bytes from other levels read during
    57  	// compactions. This excludes bytes moved and bytes ingested. For L0 this is
    58  	// the bytes written to the WAL.
    59  	BytesIn uint64
    60  	// The number of bytes ingested. The sibling metric for tables is
    61  	// TablesIngested.
    62  	BytesIngested uint64
    63  	// The number of bytes moved into the level by a "move" compaction. The
    64  	// sibling metric for tables is TablesMoved.
    65  	BytesMoved uint64
    66  	// The number of bytes read for compactions at the level. This includes bytes
    67  	// read from other levels (BytesIn), as well as bytes read for the level.
    68  	BytesRead uint64
    69  	// The number of bytes written during compactions. The sibling
    70  	// metric for tables is TablesCompacted. This metric may be summed
    71  	// with BytesFlushed to compute the total bytes written for the level.
    72  	BytesCompacted uint64
    73  	// The number of bytes written during flushes. The sibling
    74  	// metrics for tables is TablesFlushed. This metric is always
    75  	// zero for all levels other than L0.
    76  	BytesFlushed uint64
    77  	// The number of sstables compacted to this level.
    78  	TablesCompacted uint64
    79  	// The number of sstables flushed to this level.
    80  	TablesFlushed uint64
    81  	// The number of sstables ingested into the level.
    82  	TablesIngested uint64
    83  	// The number of sstables moved to this level by a "move" compaction.
    84  	TablesMoved uint64
    85  
    86  	MultiLevel struct {
    87  		// BytesInTop are the total bytes in a multilevel compaction coming from the top level.
    88  		BytesInTop uint64
    89  
    90  		// BytesIn, exclusively for multiLevel compactions.
    91  		BytesIn uint64
    92  
    93  		// BytesRead, exclusively for multilevel compactions.
    94  		BytesRead uint64
    95  	}
    96  
    97  	// Additional contains misc additional metrics that are not always printed.
    98  	Additional struct {
    99  		// The sum of Properties.ValueBlocksSize for all the sstables in this
   100  		// level. Printed by LevelMetrics.format iff there is at least one level
   101  		// with a non-zero value.
   102  		ValueBlocksSize uint64
   103  		// Cumulative metrics about bytes written to data blocks and value blocks,
   104  		// via compactions (except move compactions) or flushes. Not printed by
   105  		// LevelMetrics.format, but are available to sophisticated clients.
   106  		BytesWrittenDataBlocks  uint64
   107  		BytesWrittenValueBlocks uint64
   108  	}
   109  }
   110  
   111  // Add updates the counter metrics for the level.
   112  func (m *LevelMetrics) Add(u *LevelMetrics) {
   113  	m.NumFiles += u.NumFiles
   114  	m.NumVirtualFiles += u.NumVirtualFiles
   115  	m.VirtualSize += u.VirtualSize
   116  	m.Size += u.Size
   117  	m.BytesIn += u.BytesIn
   118  	m.BytesIngested += u.BytesIngested
   119  	m.BytesMoved += u.BytesMoved
   120  	m.BytesRead += u.BytesRead
   121  	m.BytesCompacted += u.BytesCompacted
   122  	m.BytesFlushed += u.BytesFlushed
   123  	m.TablesCompacted += u.TablesCompacted
   124  	m.TablesFlushed += u.TablesFlushed
   125  	m.TablesIngested += u.TablesIngested
   126  	m.TablesMoved += u.TablesMoved
   127  	m.MultiLevel.BytesInTop += u.MultiLevel.BytesInTop
   128  	m.MultiLevel.BytesRead += u.MultiLevel.BytesRead
   129  	m.MultiLevel.BytesIn += u.MultiLevel.BytesIn
   130  	m.Additional.BytesWrittenDataBlocks += u.Additional.BytesWrittenDataBlocks
   131  	m.Additional.BytesWrittenValueBlocks += u.Additional.BytesWrittenValueBlocks
   132  	m.Additional.ValueBlocksSize += u.Additional.ValueBlocksSize
   133  }
   134  
   135  // WriteAmp computes the write amplification for compactions at this
   136  // level. Computed as (BytesFlushed + BytesCompacted) / BytesIn.
   137  func (m *LevelMetrics) WriteAmp() float64 {
   138  	if m.BytesIn == 0 {
   139  		return 0
   140  	}
   141  	return float64(m.BytesFlushed+m.BytesCompacted) / float64(m.BytesIn)
   142  }
   143  
   144  // Metrics holds metrics for various subsystems of the DB such as the Cache,
   145  // Compactions, WAL, and per-Level metrics.
   146  //
   147  // TODO(peter): The testing of these metrics is relatively weak. There should
   148  // be testing that performs various operations on a DB and verifies that the
   149  // metrics reflect those operations.
   150  type Metrics struct {
   151  	BlockCache CacheMetrics
   152  
   153  	Compact struct {
   154  		// The total number of compactions, and per-compaction type counts.
   155  		Count             int64
   156  		DefaultCount      int64
   157  		DeleteOnlyCount   int64
   158  		ElisionOnlyCount  int64
   159  		MoveCount         int64
   160  		ReadCount         int64
   161  		RewriteCount      int64
   162  		MultiLevelCount   int64
   163  		CounterLevelCount int64
   164  		// An estimate of the number of bytes that need to be compacted for the LSM
   165  		// to reach a stable state.
   166  		EstimatedDebt uint64
   167  		// Number of bytes present in sstables being written by in-progress
   168  		// compactions. This value will be zero if there are no in-progress
   169  		// compactions.
   170  		InProgressBytes int64
   171  		// Number of compactions that are in-progress.
   172  		NumInProgress int64
   173  		// MarkedFiles is a count of files that are marked for
   174  		// compaction. Such files are compacted in a rewrite compaction
   175  		// when no other compactions are picked.
   176  		MarkedFiles int
   177  		// Duration records the cumulative duration of all compactions since the
   178  		// database was opened.
   179  		Duration time.Duration
   180  	}
   181  
   182  	Ingest struct {
   183  		// The total number of ingestions
   184  		Count uint64
   185  	}
   186  
   187  	Flush struct {
   188  		// The total number of flushes.
   189  		Count           int64
   190  		WriteThroughput ThroughputMetric
   191  		// Number of flushes that are in-progress. In the current implementation
   192  		// this will always be zero or one.
   193  		NumInProgress int64
   194  		// AsIngestCount is a monotonically increasing counter of flush operations
   195  		// handling ingested tables.
   196  		AsIngestCount uint64
   197  		// AsIngestCount is a monotonically increasing counter of tables ingested as
   198  		// flushables.
   199  		AsIngestTableCount uint64
   200  		// AsIngestBytes is a monotonically increasing counter of the bytes flushed
   201  		// for flushables that originated as ingestion operations.
   202  		AsIngestBytes uint64
   203  	}
   204  
   205  	Filter FilterMetrics
   206  
   207  	Levels [numLevels]LevelMetrics
   208  
   209  	MemTable struct {
   210  		// The number of bytes allocated by memtables and large (flushable)
   211  		// batches.
   212  		Size uint64
   213  		// The count of memtables.
   214  		Count int64
   215  		// The number of bytes present in zombie memtables which are no longer
   216  		// referenced by the current DB state. An unbounded number of memtables
   217  		// may be zombie if they're still in use by an iterator. One additional
   218  		// memtable may be zombie if it's no longer in use and waiting to be
   219  		// recycled.
   220  		ZombieSize uint64
   221  		// The count of zombie memtables.
   222  		ZombieCount int64
   223  	}
   224  
   225  	Keys struct {
   226  		// The approximate count of internal range key set keys in the database.
   227  		RangeKeySetsCount uint64
   228  		// The approximate count of internal tombstones (DEL, SINGLEDEL and
   229  		// RANGEDEL key kinds) within the database.
   230  		TombstoneCount uint64
   231  		// A cumulative total number of missized DELSIZED keys encountered by
   232  		// compactions since the database was opened.
   233  		MissizedTombstonesCount uint64
   234  	}
   235  
   236  	Snapshots struct {
   237  		// The number of currently open snapshots.
   238  		Count int
   239  		// The sequence number of the earliest, currently open snapshot.
   240  		EarliestSeqNum uint64
   241  		// A running tally of keys written to sstables during flushes or
   242  		// compactions that would've been elided if it weren't for open
   243  		// snapshots.
   244  		PinnedKeys uint64
   245  		// A running cumulative sum of the size of keys and values written to
   246  		// sstables during flushes or compactions that would've been elided if
   247  		// it weren't for open snapshots.
   248  		PinnedSize uint64
   249  	}
   250  
   251  	Table struct {
   252  		// The number of bytes present in obsolete tables which are no longer
   253  		// referenced by the current DB state or any open iterators.
   254  		ObsoleteSize uint64
   255  		// The count of obsolete tables.
   256  		ObsoleteCount int64
   257  		// The number of bytes present in zombie tables which are no longer
   258  		// referenced by the current DB state but are still in use by an iterator.
   259  		ZombieSize uint64
   260  		// The count of zombie tables.
   261  		ZombieCount int64
   262  		// The count of the backing sstables.
   263  		BackingTableCount uint64
   264  		// The sum of the sizes of the all of the backing sstables.
   265  		BackingTableSize uint64
   266  	}
   267  
   268  	TableCache CacheMetrics
   269  
   270  	// Count of the number of open sstable iterators.
   271  	TableIters int64
   272  	// Uptime is the total time since this DB was opened.
   273  	Uptime time.Duration
   274  
   275  	WAL struct {
   276  		// Number of live WAL files.
   277  		Files int64
   278  		// Number of obsolete WAL files.
   279  		ObsoleteFiles int64
   280  		// Physical size of the obsolete WAL files.
   281  		ObsoletePhysicalSize uint64
   282  		// Size of the live data in the WAL files. Note that with WAL file
   283  		// recycling this is less than the actual on-disk size of the WAL files.
   284  		Size uint64
   285  		// Physical size of the WAL files on-disk. With WAL file recycling,
   286  		// this is greater than the live data in WAL files.
   287  		PhysicalSize uint64
   288  		// Number of logical bytes written to the WAL.
   289  		BytesIn uint64
   290  		// Number of bytes written to the WAL.
   291  		BytesWritten uint64
   292  	}
   293  
   294  	LogWriter struct {
   295  		FsyncLatency prometheus.Histogram
   296  		record.LogWriterMetrics
   297  	}
   298  
   299  	SecondaryCacheMetrics SecondaryCacheMetrics
   300  
   301  	private struct {
   302  		optionsFileSize  uint64
   303  		manifestFileSize uint64
   304  	}
   305  }
   306  
   307  var (
   308  	// FsyncLatencyBuckets are prometheus histogram buckets suitable for a histogram
   309  	// that records latencies for fsyncs.
   310  	FsyncLatencyBuckets = append(
   311  		prometheus.LinearBuckets(0.0, float64(time.Microsecond*100), 50),
   312  		prometheus.ExponentialBucketsRange(float64(time.Millisecond*5), float64(10*time.Second), 50)...,
   313  	)
   314  
   315  	// SecondaryCacheIOBuckets exported to enable exporting from package pebble to
   316  	// enable exporting metrics with below buckets in CRDB.
   317  	SecondaryCacheIOBuckets = sharedcache.IOBuckets
   318  	// SecondaryCacheChannelWriteBuckets exported to enable exporting from package
   319  	// pebble to enable exporting metrics with below buckets in CRDB.
   320  	SecondaryCacheChannelWriteBuckets = sharedcache.ChannelWriteBuckets
   321  )
   322  
   323  // DiskSpaceUsage returns the total disk space used by the database in bytes,
   324  // including live and obsolete files.
   325  func (m *Metrics) DiskSpaceUsage() uint64 {
   326  	var usageBytes uint64
   327  	usageBytes += m.WAL.PhysicalSize
   328  	usageBytes += m.WAL.ObsoletePhysicalSize
   329  	for _, lm := range m.Levels {
   330  		usageBytes += uint64(lm.Size)
   331  	}
   332  	usageBytes += m.Table.ObsoleteSize
   333  	usageBytes += m.Table.ZombieSize
   334  	usageBytes += m.private.optionsFileSize
   335  	usageBytes += m.private.manifestFileSize
   336  	usageBytes += uint64(m.Compact.InProgressBytes)
   337  	return usageBytes
   338  }
   339  
   340  // NumVirtual is the number of virtual sstables in the latest version
   341  // summed over every level in the lsm.
   342  func (m *Metrics) NumVirtual() uint64 {
   343  	var n uint64
   344  	for _, level := range m.Levels {
   345  		n += level.NumVirtualFiles
   346  	}
   347  	return n
   348  }
   349  
   350  // VirtualSize is the sum of the sizes of the virtual sstables in the
   351  // latest version. BackingTableSize - VirtualSize gives an estimate for
   352  // the space amplification caused by not compacting virtual sstables.
   353  func (m *Metrics) VirtualSize() uint64 {
   354  	var size uint64
   355  	for _, level := range m.Levels {
   356  		size += level.VirtualSize
   357  	}
   358  	return size
   359  }
   360  
   361  // ReadAmp returns the current read amplification of the database.
   362  // It's computed as the number of sublevels in L0 + the number of non-empty
   363  // levels below L0.
   364  func (m *Metrics) ReadAmp() int {
   365  	var ramp int32
   366  	for _, l := range m.Levels {
   367  		ramp += l.Sublevels
   368  	}
   369  	return int(ramp)
   370  }
   371  
   372  // Total returns the sum of the per-level metrics and WAL metrics.
   373  func (m *Metrics) Total() LevelMetrics {
   374  	var total LevelMetrics
   375  	for level := 0; level < numLevels; level++ {
   376  		l := &m.Levels[level]
   377  		total.Add(l)
   378  		total.Sublevels += l.Sublevels
   379  	}
   380  	// Compute total bytes-in as the bytes written to the WAL + bytes ingested.
   381  	total.BytesIn = m.WAL.BytesWritten + total.BytesIngested
   382  	// Add the total bytes-in to the total bytes-flushed. This is to account for
   383  	// the bytes written to the log and bytes written externally and then
   384  	// ingested.
   385  	total.BytesFlushed += total.BytesIn
   386  	return total
   387  }
   388  
   389  // String pretty-prints the metrics as below:
   390  //
   391  //	      |                             |       |       |   ingested   |     moved    |    written   |       |    amp
   392  //	level | tables  size val-bl vtables | score |   in  | tables  size | tables  size | tables  size |  read |   r   w
   393  //	------+-----------------------------+-------+-------+--------------+--------------+--------------+-------+---------
   394  //	    0 |   101   102B     0B       0 | 103.0 |  104B |   112   104B |   113   106B |   221   217B |  107B |   1  2.1
   395  //	    1 |   201   202B     0B       0 | 203.0 |  204B |   212   204B |   213   206B |   421   417B |  207B |   2  2.0
   396  //	    2 |   301   302B     0B       0 | 303.0 |  304B |   312   304B |   313   306B |   621   617B |  307B |   3  2.0
   397  //	    3 |   401   402B     0B       0 | 403.0 |  404B |   412   404B |   413   406B |   821   817B |  407B |   4  2.0
   398  //	    4 |   501   502B     0B       0 | 503.0 |  504B |   512   504B |   513   506B |  1.0K  1017B |  507B |   5  2.0
   399  //	    5 |   601   602B     0B       0 | 603.0 |  604B |   612   604B |   613   606B |  1.2K  1.2KB |  607B |   6  2.0
   400  //	    6 |   701   702B     0B       0 |     - |  704B |   712   704B |   713   706B |  1.4K  1.4KB |  707B |   7  2.0
   401  //	total |  2.8K  2.7KB     0B       0 |     - | 2.8KB |  2.9K  2.8KB |  2.9K  2.8KB |  5.7K  8.4KB | 2.8KB |  28  3.0
   402  //	-------------------------------------------------------------------------------------------------------------------
   403  //	WAL: 22 files (24B)  in: 25B  written: 26B (4% overhead)
   404  //	Flushes: 8
   405  //	Compactions: 5  estimated debt: 6B  in progress: 2 (7B)
   406  //	default: 27  delete: 28  elision: 29  move: 30  read: 31  rewrite: 32  multi-level: 33
   407  //	MemTables: 12 (11B)  zombie: 14 (13B)
   408  //	Zombie tables: 16 (15B)
   409  //	Backing tables: 0 (0B)
   410  //	Block cache: 2 entries (1B)  hit rate: 42.9%
   411  //	Table cache: 18 entries (17B)  hit rate: 48.7%
   412  //	Secondary cache: 40 entries (40B)  hit rate: 49.9%
   413  //	Snapshots: 4  earliest seq num: 1024
   414  //	Table iters: 21
   415  //	Filter utility: 47.4%
   416  //	Ingestions: 27  as flushable: 36 (34B in 35 tables)
   417  func (m *Metrics) String() string {
   418  	return redact.StringWithoutMarkers(m)
   419  }
   420  
   421  var _ redact.SafeFormatter = &Metrics{}
   422  
   423  // SafeFormat implements redact.SafeFormatter.
   424  func (m *Metrics) SafeFormat(w redact.SafePrinter, _ rune) {
   425  	// NB: Pebble does not make any assumptions as to which Go primitive types
   426  	// have been registered as safe with redact.RegisterSafeType and does not
   427  	// register any types itself. Some of the calls to `redact.Safe`, etc are
   428  	// superfluous in the context of CockroachDB, which registers all the Go
   429  	// numeric types as safe.
   430  
   431  	// TODO(jackson): There are a few places where we use redact.SafeValue
   432  	// instead of redact.RedactableString. This is necessary because of a bug
   433  	// whereby formatting a redact.RedactableString argument does not respect
   434  	// width specifiers. When the issue is fixed, we can convert these to
   435  	// RedactableStrings. https://github.com/cockroachdb/redact/issues/17
   436  
   437  	multiExists := m.Compact.MultiLevelCount > 0
   438  	appendIfMulti := func(line redact.SafeString) {
   439  		if multiExists {
   440  			w.SafeString(line)
   441  		}
   442  	}
   443  	newline := func() {
   444  		w.SafeString("\n")
   445  	}
   446  
   447  	w.SafeString("      |                             |       |       |   ingested   |     moved    |    written   |       |    amp")
   448  	appendIfMulti("   |     multilevel")
   449  	newline()
   450  	w.SafeString("level | tables  size val-bl vtables | score |   in  | tables  size | tables  size | tables  size |  read |   r   w")
   451  	appendIfMulti("  |    top   in  read")
   452  	newline()
   453  	w.SafeString("------+-----------------------------+-------+-------+--------------+--------------+--------------+-------+---------")
   454  	appendIfMulti("-+------------------")
   455  	newline()
   456  
   457  	// formatRow prints out a row of the table.
   458  	formatRow := func(m *LevelMetrics, score float64) {
   459  		scoreStr := "-"
   460  		if !math.IsNaN(score) {
   461  			// Try to keep the string no longer than 5 characters.
   462  			switch {
   463  			case score < 99.995:
   464  				scoreStr = fmt.Sprintf("%.2f", score)
   465  			case score < 999.95:
   466  				scoreStr = fmt.Sprintf("%.1f", score)
   467  			default:
   468  				scoreStr = fmt.Sprintf("%.0f", score)
   469  			}
   470  		}
   471  		var wampStr string
   472  		if wamp := m.WriteAmp(); wamp > 99.5 {
   473  			wampStr = fmt.Sprintf("%.0f", wamp)
   474  		} else {
   475  			wampStr = fmt.Sprintf("%.1f", wamp)
   476  		}
   477  
   478  		w.Printf("| %5s %6s %6s %7s | %5s | %5s | %5s %6s | %5s %6s | %5s %6s | %5s | %3d %4s",
   479  			humanize.Count.Int64(m.NumFiles),
   480  			humanize.Bytes.Int64(m.Size),
   481  			humanize.Bytes.Uint64(m.Additional.ValueBlocksSize),
   482  			humanize.Count.Uint64(m.NumVirtualFiles),
   483  			redact.Safe(scoreStr),
   484  			humanize.Bytes.Uint64(m.BytesIn),
   485  			humanize.Count.Uint64(m.TablesIngested),
   486  			humanize.Bytes.Uint64(m.BytesIngested),
   487  			humanize.Count.Uint64(m.TablesMoved),
   488  			humanize.Bytes.Uint64(m.BytesMoved),
   489  			humanize.Count.Uint64(m.TablesFlushed+m.TablesCompacted),
   490  			humanize.Bytes.Uint64(m.BytesFlushed+m.BytesCompacted),
   491  			humanize.Bytes.Uint64(m.BytesRead),
   492  			redact.Safe(m.Sublevels),
   493  			redact.Safe(wampStr))
   494  
   495  		if multiExists {
   496  			w.Printf(" | %5s %5s %5s",
   497  				humanize.Bytes.Uint64(m.MultiLevel.BytesInTop),
   498  				humanize.Bytes.Uint64(m.MultiLevel.BytesIn),
   499  				humanize.Bytes.Uint64(m.MultiLevel.BytesRead))
   500  		}
   501  		newline()
   502  	}
   503  
   504  	var total LevelMetrics
   505  	for level := 0; level < numLevels; level++ {
   506  		l := &m.Levels[level]
   507  		w.Printf("%5d ", redact.Safe(level))
   508  
   509  		// Format the score.
   510  		score := math.NaN()
   511  		if level < numLevels-1 {
   512  			score = l.Score
   513  		}
   514  		formatRow(l, score)
   515  		total.Add(l)
   516  		total.Sublevels += l.Sublevels
   517  	}
   518  	// Compute total bytes-in as the bytes written to the WAL + bytes ingested.
   519  	total.BytesIn = m.WAL.BytesWritten + total.BytesIngested
   520  	// Add the total bytes-in to the total bytes-flushed. This is to account for
   521  	// the bytes written to the log and bytes written externally and then
   522  	// ingested.
   523  	total.BytesFlushed += total.BytesIn
   524  	w.SafeString("total ")
   525  	formatRow(&total, math.NaN())
   526  
   527  	w.SafeString("-------------------------------------------------------------------------------------------------------------------")
   528  	appendIfMulti("--------------------")
   529  	newline()
   530  	w.Printf("WAL: %d files (%s)  in: %s  written: %s (%.0f%% overhead)\n",
   531  		redact.Safe(m.WAL.Files),
   532  		humanize.Bytes.Uint64(m.WAL.Size),
   533  		humanize.Bytes.Uint64(m.WAL.BytesIn),
   534  		humanize.Bytes.Uint64(m.WAL.BytesWritten),
   535  		redact.Safe(percent(int64(m.WAL.BytesWritten)-int64(m.WAL.BytesIn), int64(m.WAL.BytesIn))))
   536  
   537  	w.Printf("Flushes: %d\n", redact.Safe(m.Flush.Count))
   538  
   539  	w.Printf("Compactions: %d  estimated debt: %s  in progress: %d (%s)\n",
   540  		redact.Safe(m.Compact.Count),
   541  		humanize.Bytes.Uint64(m.Compact.EstimatedDebt),
   542  		redact.Safe(m.Compact.NumInProgress),
   543  		humanize.Bytes.Int64(m.Compact.InProgressBytes))
   544  
   545  	w.Printf("             default: %d  delete: %d  elision: %d  move: %d  read: %d  rewrite: %d  multi-level: %d\n",
   546  		redact.Safe(m.Compact.DefaultCount),
   547  		redact.Safe(m.Compact.DeleteOnlyCount),
   548  		redact.Safe(m.Compact.ElisionOnlyCount),
   549  		redact.Safe(m.Compact.MoveCount),
   550  		redact.Safe(m.Compact.ReadCount),
   551  		redact.Safe(m.Compact.RewriteCount),
   552  		redact.Safe(m.Compact.MultiLevelCount))
   553  
   554  	w.Printf("MemTables: %d (%s)  zombie: %d (%s)\n",
   555  		redact.Safe(m.MemTable.Count),
   556  		humanize.Bytes.Uint64(m.MemTable.Size),
   557  		redact.Safe(m.MemTable.ZombieCount),
   558  		humanize.Bytes.Uint64(m.MemTable.ZombieSize))
   559  
   560  	w.Printf("Zombie tables: %d (%s)\n",
   561  		redact.Safe(m.Table.ZombieCount),
   562  		humanize.Bytes.Uint64(m.Table.ZombieSize))
   563  
   564  	w.Printf("Backing tables: %d (%s)\n",
   565  		redact.Safe(m.Table.BackingTableCount),
   566  		humanize.Bytes.Uint64(m.Table.BackingTableSize))
   567  	w.Printf("Virtual tables: %d (%s)\n",
   568  		redact.Safe(m.NumVirtual()),
   569  		humanize.Bytes.Uint64(m.VirtualSize()))
   570  
   571  	formatCacheMetrics := func(m *CacheMetrics, name redact.SafeString) {
   572  		w.Printf("%s: %s entries (%s)  hit rate: %.1f%%\n",
   573  			name,
   574  			humanize.Count.Int64(m.Count),
   575  			humanize.Bytes.Int64(m.Size),
   576  			redact.Safe(hitRate(m.Hits, m.Misses)))
   577  	}
   578  	formatCacheMetrics(&m.BlockCache, "Block cache")
   579  	formatCacheMetrics(&m.TableCache, "Table cache")
   580  
   581  	formatSharedCacheMetrics := func(w redact.SafePrinter, m *SecondaryCacheMetrics, name redact.SafeString) {
   582  		w.Printf("%s: %s entries (%s)  hit rate: %.1f%%\n",
   583  			name,
   584  			humanize.Count.Int64(m.Count),
   585  			humanize.Bytes.Int64(m.Size),
   586  			redact.Safe(hitRate(m.ReadsWithFullHit, m.ReadsWithPartialHit+m.ReadsWithNoHit)))
   587  	}
   588  	formatSharedCacheMetrics(w, &m.SecondaryCacheMetrics, "Secondary cache")
   589  
   590  	w.Printf("Snapshots: %d  earliest seq num: %d\n",
   591  		redact.Safe(m.Snapshots.Count),
   592  		redact.Safe(m.Snapshots.EarliestSeqNum))
   593  
   594  	w.Printf("Table iters: %d\n", redact.Safe(m.TableIters))
   595  	w.Printf("Filter utility: %.1f%%\n", redact.Safe(hitRate(m.Filter.Hits, m.Filter.Misses)))
   596  	w.Printf("Ingestions: %d  as flushable: %d (%s in %d tables)\n",
   597  		redact.Safe(m.Ingest.Count),
   598  		redact.Safe(m.Flush.AsIngestCount),
   599  		humanize.Bytes.Uint64(m.Flush.AsIngestBytes),
   600  		redact.Safe(m.Flush.AsIngestTableCount))
   601  }
   602  
   603  func hitRate(hits, misses int64) float64 {
   604  	return percent(hits, hits+misses)
   605  }
   606  
   607  func percent(numerator, denominator int64) float64 {
   608  	if denominator == 0 {
   609  		return 0
   610  	}
   611  	return 100 * float64(numerator) / float64(denominator)
   612  }
   613  
   614  // StringForTests is identical to m.String() on 64-bit platforms. It is used to
   615  // provide a platform-independent result for tests.
   616  func (m *Metrics) StringForTests() string {
   617  	mCopy := *m
   618  	if math.MaxInt == math.MaxInt32 {
   619  		// This is the difference in Sizeof(sstable.Reader{})) between 64 and 32 bit
   620  		// platforms.
   621  		const tableCacheSizeAdjustment = 212
   622  		mCopy.TableCache.Size += mCopy.TableCache.Count * tableCacheSizeAdjustment
   623  	}
   624  	return redact.StringWithoutMarkers(&mCopy)
   625  }